1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2    Copyright (C) 1999-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "alias.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "memmodel.h"
28 #include "tm_p.h"
29 #include "diagnostic-core.h"
30 #include "flags.h"
31 #include "ggc-internal.h"
32 #include "timevar.h"
33 #include "cgraph.h"
34 #include "cfgloop.h"
35 #include "plugin.h"
36 
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38    file open.  Prefer either to valloc.  */
39 #ifdef HAVE_MMAP_ANON
40 # undef HAVE_MMAP_DEV_ZERO
41 # define USING_MMAP
42 #endif
43 
44 #ifdef HAVE_MMAP_DEV_ZERO
45 # define USING_MMAP
46 #endif
47 
48 #ifndef USING_MMAP
49 #define USING_MALLOC_PAGE_GROUPS
50 #endif
51 
52 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53     && defined(USING_MMAP)
54 # define USING_MADVISE
55 #endif
56 
57 /* Strategy:
58 
59    This garbage-collecting allocator allocates objects on one of a set
60    of pages.  Each page can allocate objects of a single size only;
61    available sizes are powers of two starting at four bytes.  The size
62    of an allocation request is rounded up to the next power of two
63    (`order'), and satisfied from the appropriate page.
64 
65    Each page is recorded in a page-entry, which also maintains an
66    in-use bitmap of object positions on the page.  This allows the
67    allocation state of a particular object to be flipped without
68    touching the page itself.
69 
70    Each page-entry also has a context depth, which is used to track
71    pushing and popping of allocation contexts.  Only objects allocated
72    in the current (highest-numbered) context may be collected.
73 
74    Page entries are arranged in an array of singly-linked lists.  The
75    array is indexed by the allocation size, in bits, of the pages on
76    it; i.e. all pages on a list allocate objects of the same size.
77    Pages are ordered on the list such that all non-full pages precede
78    all full pages, with non-full pages arranged in order of decreasing
79    context depth.
80 
81    Empty pages (of all orders) are kept on a single page cache list,
82    and are considered first when new pages are required; they are
83    deallocated at the start of the next collection if they haven't
84    been recycled by then.  */
85 
86 /* Define GGC_DEBUG_LEVEL to print debugging information.
87      0: No debugging output.
88      1: GC statistics only.
89      2: Page-entry allocations/deallocations as well.
90      3: Object allocations as well.
91      4: Object marks as well.  */
92 #define GGC_DEBUG_LEVEL (0)
93 
94 /* A two-level tree is used to look up the page-entry for a given
95    pointer.  Two chunks of the pointer's bits are extracted to index
96    the first and second levels of the tree, as follows:
97 
98 				   HOST_PAGE_SIZE_BITS
99 			   32		|      |
100        msb +----------------+----+------+------+ lsb
101 			    |    |      |
102 			 PAGE_L1_BITS   |
103 				 |      |
104 			       PAGE_L2_BITS
105 
106    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
107    pages are aligned on system page boundaries.  The next most
108    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
109    index values in the lookup table, respectively.
110 
111    For 32-bit architectures and the settings below, there are no
112    leftover bits.  For architectures with wider pointers, the lookup
113    tree points to a list of pages, which must be scanned to find the
114    correct one.  */
115 
116 #define PAGE_L1_BITS	(8)
117 #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
118 #define PAGE_L1_SIZE	((uintptr_t) 1 << PAGE_L1_BITS)
119 #define PAGE_L2_SIZE	((uintptr_t) 1 << PAGE_L2_BITS)
120 
121 #define LOOKUP_L1(p) \
122   (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
123 
124 #define LOOKUP_L2(p) \
125   (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
126 
127 /* The number of objects per allocation page, for objects on a page of
128    the indicated ORDER.  */
129 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
130 
131 /* The number of objects in P.  */
132 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
133 
134 /* The size of an object on a page of the indicated ORDER.  */
135 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
136 
137 /* For speed, we avoid doing a general integer divide to locate the
138    offset in the allocation bitmap, by precalculating numbers M, S
139    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
140    within the page which is evenly divisible by the object size Z.  */
141 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
142 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
143 #define OFFSET_TO_BIT(OFFSET, ORDER) \
144   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
145 
146 /* We use this structure to determine the alignment required for
147    allocations.  For power-of-two sized allocations, that's not a
148    problem, but it does matter for odd-sized allocations.
149    We do not care about alignment for floating-point types.  */
150 
151 struct max_alignment {
152   char c;
153   union {
154     int64_t i;
155     void *p;
156   } u;
157 };
158 
159 /* The biggest alignment required.  */
160 
161 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
162 
163 
164 /* The number of extra orders, not corresponding to power-of-two sized
165    objects.  */
166 
167 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
168 
169 #define RTL_SIZE(NSLOTS) \
170   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
171 
172 #define TREE_EXP_SIZE(OPS) \
173   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
174 
175 /* The Ith entry is the maximum size of an object to be stored in the
176    Ith extra order.  Adding a new entry to this array is the *only*
177    thing you need to do to add a new special allocation size.  */
178 
179 static const size_t extra_order_size_table[] = {
180   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
181      There are a lot of structures with these sizes and explicitly
182      listing them risks orders being dropped because they changed size.  */
183   MAX_ALIGNMENT * 3,
184   MAX_ALIGNMENT * 5,
185   MAX_ALIGNMENT * 6,
186   MAX_ALIGNMENT * 7,
187   MAX_ALIGNMENT * 9,
188   MAX_ALIGNMENT * 10,
189   MAX_ALIGNMENT * 11,
190   MAX_ALIGNMENT * 12,
191   MAX_ALIGNMENT * 13,
192   MAX_ALIGNMENT * 14,
193   MAX_ALIGNMENT * 15,
194   sizeof (struct tree_decl_non_common),
195   sizeof (struct tree_field_decl),
196   sizeof (struct tree_parm_decl),
197   sizeof (struct tree_var_decl),
198   sizeof (struct tree_type_non_common),
199   sizeof (struct function),
200   sizeof (struct basic_block_def),
201   sizeof (struct cgraph_node),
202   sizeof (class loop),
203 };
204 
205 /* The total number of orders.  */
206 
207 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
208 
209 /* Compute the smallest nonnegative number which when added to X gives
210    a multiple of F.  */
211 
212 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
213 
214 /* Round X to next multiple of the page size */
215 
216 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
217 
218 /* The Ith entry is the number of objects on a page or order I.  */
219 
220 static unsigned objects_per_page_table[NUM_ORDERS];
221 
222 /* The Ith entry is the size of an object on a page of order I.  */
223 
224 static size_t object_size_table[NUM_ORDERS];
225 
226 /* The Ith entry is a pair of numbers (mult, shift) such that
227    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
228    for all k evenly divisible by OBJECT_SIZE(I).  */
229 
230 static struct
231 {
232   size_t mult;
233   unsigned int shift;
234 }
235 inverse_table[NUM_ORDERS];
236 
237 /* A page_entry records the status of an allocation page.  This
238    structure is dynamically sized to fit the bitmap in_use_p.  */
239 struct page_entry
240 {
241   /* The next page-entry with objects of the same size, or NULL if
242      this is the last page-entry.  */
243   struct page_entry *next;
244 
245   /* The previous page-entry with objects of the same size, or NULL if
246      this is the first page-entry.   The PREV pointer exists solely to
247      keep the cost of ggc_free manageable.  */
248   struct page_entry *prev;
249 
250   /* The number of bytes allocated.  (This will always be a multiple
251      of the host system page size.)  */
252   size_t bytes;
253 
254   /* The address at which the memory is allocated.  */
255   char *page;
256 
257 #ifdef USING_MALLOC_PAGE_GROUPS
258   /* Back pointer to the page group this page came from.  */
259   struct page_group *group;
260 #endif
261 
262   /* This is the index in the by_depth varray where this page table
263      can be found.  */
264   unsigned long index_by_depth;
265 
266   /* Context depth of this page.  */
267   unsigned short context_depth;
268 
269   /* The number of free objects remaining on this page.  */
270   unsigned short num_free_objects;
271 
272   /* A likely candidate for the bit position of a free object for the
273      next allocation from this page.  */
274   unsigned short next_bit_hint;
275 
276   /* The lg of size of objects allocated from this page.  */
277   unsigned char order;
278 
279   /* Discarded page? */
280   bool discarded;
281 
282   /* A bit vector indicating whether or not objects are in use.  The
283      Nth bit is one if the Nth object on this page is allocated.  This
284      array is dynamically sized.  */
285   unsigned long in_use_p[1];
286 };
287 
288 #ifdef USING_MALLOC_PAGE_GROUPS
289 /* A page_group describes a large allocation from malloc, from which
290    we parcel out aligned pages.  */
291 struct page_group
292 {
293   /* A linked list of all extant page groups.  */
294   struct page_group *next;
295 
296   /* The address we received from malloc.  */
297   char *allocation;
298 
299   /* The size of the block.  */
300   size_t alloc_size;
301 
302   /* A bitmask of pages in use.  */
303   unsigned int in_use;
304 };
305 #endif
306 
307 #if HOST_BITS_PER_PTR <= 32
308 
309 /* On 32-bit hosts, we use a two level page table, as pictured above.  */
310 typedef page_entry **page_table[PAGE_L1_SIZE];
311 
312 #else
313 
314 /* On 64-bit hosts, we use the same two level page tables plus a linked
315    list that disambiguates the top 32-bits.  There will almost always be
316    exactly one entry in the list.  */
317 typedef struct page_table_chain
318 {
319   struct page_table_chain *next;
320   size_t high_bits;
321   page_entry **table[PAGE_L1_SIZE];
322 } *page_table;
323 
324 #endif
325 
326 class finalizer
327 {
328 public:
finalizer(void * addr,void (* f)(void *))329   finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
330 
addr()331   void *addr () const { return m_addr; }
332 
call()333   void call () const { m_function (m_addr); }
334 
335 private:
336   void *m_addr;
337   void (*m_function)(void *);
338 };
339 
340 class vec_finalizer
341 {
342 public:
vec_finalizer(uintptr_t addr,void (* f)(void *),size_t s,size_t n)343   vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
344     m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
345 
call()346   void call () const
347     {
348       for (size_t i = 0; i < m_n_objects; i++)
349 	m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
350     }
351 
addr()352   void *addr () const { return reinterpret_cast<void *> (m_addr); }
353 
354 private:
355   uintptr_t m_addr;
356   void (*m_function)(void *);
357   size_t m_object_size;
358   size_t m_n_objects;
359 };
360 
361 #ifdef ENABLE_GC_ALWAYS_COLLECT
362 /* List of free objects to be verified as actually free on the
363    next collection.  */
364 struct free_object
365 {
366   void *object;
367   struct free_object *next;
368 };
369 #endif
370 
371 /* The rest of the global variables.  */
372 static struct ggc_globals
373 {
374   /* The Nth element in this array is a page with objects of size 2^N.
375      If there are any pages with free objects, they will be at the
376      head of the list.  NULL if there are no page-entries for this
377      object size.  */
378   page_entry *pages[NUM_ORDERS];
379 
380   /* The Nth element in this array is the last page with objects of
381      size 2^N.  NULL if there are no page-entries for this object
382      size.  */
383   page_entry *page_tails[NUM_ORDERS];
384 
385   /* Lookup table for associating allocation pages with object addresses.  */
386   page_table lookup;
387 
388   /* The system's page size.  */
389   size_t pagesize;
390   size_t lg_pagesize;
391 
392   /* Bytes currently allocated.  */
393   size_t allocated;
394 
395   /* Bytes currently allocated at the end of the last collection.  */
396   size_t allocated_last_gc;
397 
398   /* Total amount of memory mapped.  */
399   size_t bytes_mapped;
400 
401   /* Bit N set if any allocations have been done at context depth N.  */
402   unsigned long context_depth_allocations;
403 
404   /* Bit N set if any collections have been done at context depth N.  */
405   unsigned long context_depth_collections;
406 
407   /* The current depth in the context stack.  */
408   unsigned short context_depth;
409 
410   /* A file descriptor open to /dev/zero for reading.  */
411 #if defined (HAVE_MMAP_DEV_ZERO)
412   int dev_zero_fd;
413 #endif
414 
415   /* A cache of free system pages.  */
416   page_entry *free_pages;
417 
418 #ifdef USING_MALLOC_PAGE_GROUPS
419   page_group *page_groups;
420 #endif
421 
422   /* The file descriptor for debugging output.  */
423   FILE *debug_file;
424 
425   /* Current number of elements in use in depth below.  */
426   unsigned int depth_in_use;
427 
428   /* Maximum number of elements that can be used before resizing.  */
429   unsigned int depth_max;
430 
431   /* Each element of this array is an index in by_depth where the given
432      depth starts.  This structure is indexed by that given depth we
433      are interested in.  */
434   unsigned int *depth;
435 
436   /* Current number of elements in use in by_depth below.  */
437   unsigned int by_depth_in_use;
438 
439   /* Maximum number of elements that can be used before resizing.  */
440   unsigned int by_depth_max;
441 
442   /* Each element of this array is a pointer to a page_entry, all
443      page_entries can be found in here by increasing depth.
444      index_by_depth in the page_entry is the index into this data
445      structure where that page_entry can be found.  This is used to
446      speed up finding all page_entries at a particular depth.  */
447   page_entry **by_depth;
448 
449   /* Each element is a pointer to the saved in_use_p bits, if any,
450      zero otherwise.  We allocate them all together, to enable a
451      better runtime data access pattern.  */
452   unsigned long **save_in_use;
453 
454   /* Finalizers for single objects.  The first index is collection_depth.  */
455   vec<vec<finalizer> > finalizers;
456 
457   /* Finalizers for vectors of objects.  */
458   vec<vec<vec_finalizer> > vec_finalizers;
459 
460 #ifdef ENABLE_GC_ALWAYS_COLLECT
461   /* List of free objects to be verified as actually free on the
462      next collection.  */
463   struct free_object *free_object_list;
464 #endif
465 
466   struct
467   {
468     /* Total GC-allocated memory.  */
469     unsigned long long total_allocated;
470     /* Total overhead for GC-allocated memory.  */
471     unsigned long long total_overhead;
472 
473     /* Total allocations and overhead for sizes less than 32, 64 and 128.
474        These sizes are interesting because they are typical cache line
475        sizes.  */
476 
477     unsigned long long total_allocated_under32;
478     unsigned long long total_overhead_under32;
479 
480     unsigned long long total_allocated_under64;
481     unsigned long long total_overhead_under64;
482 
483     unsigned long long total_allocated_under128;
484     unsigned long long total_overhead_under128;
485 
486     /* The allocations for each of the allocation orders.  */
487     unsigned long long total_allocated_per_order[NUM_ORDERS];
488 
489     /* The overhead for each of the allocation orders.  */
490     unsigned long long total_overhead_per_order[NUM_ORDERS];
491   } stats;
492 } G;
493 
494 /* True if a gc is currently taking place.  */
495 
496 static bool in_gc = false;
497 
498 /* The size in bytes required to maintain a bitmap for the objects
499    on a page-entry.  */
500 #define BITMAP_SIZE(Num_objects) \
501   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
502 
503 /* Allocate pages in chunks of this size, to throttle calls to memory
504    allocation routines.  The first page is used, the rest go onto the
505    free list.  This cannot be larger than HOST_BITS_PER_INT for the
506    in_use bitmask for page_group.  Hosts that need a different value
507    can override this by defining GGC_QUIRE_SIZE explicitly.  */
508 #ifndef GGC_QUIRE_SIZE
509 # ifdef USING_MMAP
510 #  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
511 # else
512 #  define GGC_QUIRE_SIZE 16
513 # endif
514 #endif
515 
516 /* Initial guess as to how many page table entries we might need.  */
517 #define INITIAL_PTE_COUNT 128
518 
519 static page_entry *lookup_page_table_entry (const void *);
520 static void set_page_table_entry (void *, page_entry *);
521 #ifdef USING_MMAP
522 static char *alloc_anon (char *, size_t, bool check);
523 #endif
524 #ifdef USING_MALLOC_PAGE_GROUPS
525 static size_t page_group_index (char *, char *);
526 static void set_page_group_in_use (page_group *, char *);
527 static void clear_page_group_in_use (page_group *, char *);
528 #endif
529 static struct page_entry * alloc_page (unsigned);
530 static void free_page (struct page_entry *);
531 static void clear_marks (void);
532 static void sweep_pages (void);
533 static void ggc_recalculate_in_use_p (page_entry *);
534 static void compute_inverse (unsigned);
535 static inline void adjust_depth (void);
536 static void move_ptes_to_front (int, int);
537 
538 void debug_print_page_list (int);
539 static void push_depth (unsigned int);
540 static void push_by_depth (page_entry *, unsigned long *);
541 
542 /* Push an entry onto G.depth.  */
543 
544 inline static void
push_depth(unsigned int i)545 push_depth (unsigned int i)
546 {
547   if (G.depth_in_use >= G.depth_max)
548     {
549       G.depth_max *= 2;
550       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
551     }
552   G.depth[G.depth_in_use++] = i;
553 }
554 
555 /* Push an entry onto G.by_depth and G.save_in_use.  */
556 
557 inline static void
push_by_depth(page_entry * p,unsigned long * s)558 push_by_depth (page_entry *p, unsigned long *s)
559 {
560   if (G.by_depth_in_use >= G.by_depth_max)
561     {
562       G.by_depth_max *= 2;
563       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
564       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
565 				  G.by_depth_max);
566     }
567   G.by_depth[G.by_depth_in_use] = p;
568   G.save_in_use[G.by_depth_in_use++] = s;
569 }
570 
571 #if (GCC_VERSION < 3001)
572 #define prefetch(X) ((void) X)
573 #else
574 #define prefetch(X) __builtin_prefetch (X)
575 #endif
576 
577 #define save_in_use_p_i(__i) \
578   (G.save_in_use[__i])
579 #define save_in_use_p(__p) \
580   (save_in_use_p_i (__p->index_by_depth))
581 
582 /* Traverse the page table and find the entry for a page.
583    If the object wasn't allocated in GC return NULL.  */
584 
585 static inline page_entry *
safe_lookup_page_table_entry(const void * p)586 safe_lookup_page_table_entry (const void *p)
587 {
588   page_entry ***base;
589   size_t L1, L2;
590 
591 #if HOST_BITS_PER_PTR <= 32
592   base = &G.lookup[0];
593 #else
594   page_table table = G.lookup;
595   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
596   while (1)
597     {
598       if (table == NULL)
599 	return NULL;
600       if (table->high_bits == high_bits)
601 	break;
602       table = table->next;
603     }
604   base = &table->table[0];
605 #endif
606 
607   /* Extract the level 1 and 2 indices.  */
608   L1 = LOOKUP_L1 (p);
609   L2 = LOOKUP_L2 (p);
610   if (! base[L1])
611     return NULL;
612 
613   return base[L1][L2];
614 }
615 
616 /* Traverse the page table and find the entry for a page.
617    Die (probably) if the object wasn't allocated via GC.  */
618 
619 static inline page_entry *
lookup_page_table_entry(const void * p)620 lookup_page_table_entry (const void *p)
621 {
622   page_entry ***base;
623   size_t L1, L2;
624 
625 #if HOST_BITS_PER_PTR <= 32
626   base = &G.lookup[0];
627 #else
628   page_table table = G.lookup;
629   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
630   while (table->high_bits != high_bits)
631     table = table->next;
632   base = &table->table[0];
633 #endif
634 
635   /* Extract the level 1 and 2 indices.  */
636   L1 = LOOKUP_L1 (p);
637   L2 = LOOKUP_L2 (p);
638 
639   return base[L1][L2];
640 }
641 
642 /* Set the page table entry for a page.  */
643 
644 static void
set_page_table_entry(void * p,page_entry * entry)645 set_page_table_entry (void *p, page_entry *entry)
646 {
647   page_entry ***base;
648   size_t L1, L2;
649 
650 #if HOST_BITS_PER_PTR <= 32
651   base = &G.lookup[0];
652 #else
653   page_table table;
654   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
655   for (table = G.lookup; table; table = table->next)
656     if (table->high_bits == high_bits)
657       goto found;
658 
659   /* Not found -- allocate a new table.  */
660   table = XCNEW (struct page_table_chain);
661   table->next = G.lookup;
662   table->high_bits = high_bits;
663   G.lookup = table;
664 found:
665   base = &table->table[0];
666 #endif
667 
668   /* Extract the level 1 and 2 indices.  */
669   L1 = LOOKUP_L1 (p);
670   L2 = LOOKUP_L2 (p);
671 
672   if (base[L1] == NULL)
673     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
674 
675   base[L1][L2] = entry;
676 }
677 
678 /* Prints the page-entry for object size ORDER, for debugging.  */
679 
680 DEBUG_FUNCTION void
debug_print_page_list(int order)681 debug_print_page_list (int order)
682 {
683   page_entry *p;
684   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
685 	  (void *) G.page_tails[order]);
686   p = G.pages[order];
687   while (p != NULL)
688     {
689       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
690 	      p->num_free_objects);
691       p = p->next;
692     }
693   printf ("NULL\n");
694   fflush (stdout);
695 }
696 
697 #ifdef USING_MMAP
698 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
699    (if non-null).  The ifdef structure here is intended to cause a
700    compile error unless exactly one of the HAVE_* is defined.  */
701 
702 static inline char *
alloc_anon(char * pref ATTRIBUTE_UNUSED,size_t size,bool check)703 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
704 {
705 #ifdef HAVE_MMAP_ANON
706   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
707 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
708 #endif
709 #ifdef HAVE_MMAP_DEV_ZERO
710   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
711 			      MAP_PRIVATE, G.dev_zero_fd, 0);
712 #endif
713 
714   if (page == (char *) MAP_FAILED)
715     {
716       if (!check)
717         return NULL;
718       perror ("virtual memory exhausted");
719       exit (FATAL_EXIT_CODE);
720     }
721 
722   /* Remember that we allocated this memory.  */
723   G.bytes_mapped += size;
724 
725   /* Pretend we don't have access to the allocated pages.  We'll enable
726      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
727      handle to avoid handle leak.  */
728   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
729 
730   return page;
731 }
732 #endif
733 #ifdef USING_MALLOC_PAGE_GROUPS
734 /* Compute the index for this page into the page group.  */
735 
736 static inline size_t
page_group_index(char * allocation,char * page)737 page_group_index (char *allocation, char *page)
738 {
739   return (size_t) (page - allocation) >> G.lg_pagesize;
740 }
741 
742 /* Set and clear the in_use bit for this page in the page group.  */
743 
744 static inline void
set_page_group_in_use(page_group * group,char * page)745 set_page_group_in_use (page_group *group, char *page)
746 {
747   group->in_use |= 1 << page_group_index (group->allocation, page);
748 }
749 
750 static inline void
clear_page_group_in_use(page_group * group,char * page)751 clear_page_group_in_use (page_group *group, char *page)
752 {
753   group->in_use &= ~(1 << page_group_index (group->allocation, page));
754 }
755 #endif
756 
757 /* Allocate a new page for allocating objects of size 2^ORDER,
758    and return an entry for it.  The entry is not added to the
759    appropriate page_table list.  */
760 
761 static inline struct page_entry *
alloc_page(unsigned order)762 alloc_page (unsigned order)
763 {
764   struct page_entry *entry, *p, **pp;
765   char *page;
766   size_t num_objects;
767   size_t bitmap_size;
768   size_t page_entry_size;
769   size_t entry_size;
770 #ifdef USING_MALLOC_PAGE_GROUPS
771   page_group *group;
772 #endif
773 
774   num_objects = OBJECTS_PER_PAGE (order);
775   bitmap_size = BITMAP_SIZE (num_objects + 1);
776   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
777   entry_size = num_objects * OBJECT_SIZE (order);
778   if (entry_size < G.pagesize)
779     entry_size = G.pagesize;
780   entry_size = PAGE_ALIGN (entry_size);
781 
782   entry = NULL;
783   page = NULL;
784 
785   /* Check the list of free pages for one we can use.  */
786   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
787     if (p->bytes == entry_size)
788       break;
789 
790   if (p != NULL)
791     {
792       if (p->discarded)
793         G.bytes_mapped += p->bytes;
794       p->discarded = false;
795 
796       /* Recycle the allocated memory from this page ...  */
797       *pp = p->next;
798       page = p->page;
799 
800 #ifdef USING_MALLOC_PAGE_GROUPS
801       group = p->group;
802 #endif
803 
804       /* ... and, if possible, the page entry itself.  */
805       if (p->order == order)
806 	{
807 	  entry = p;
808 	  memset (entry, 0, page_entry_size);
809 	}
810       else
811 	free (p);
812     }
813 #ifdef USING_MMAP
814   else if (entry_size == G.pagesize)
815     {
816       /* We want just one page.  Allocate a bunch of them and put the
817 	 extras on the freelist.  (Can only do this optimization with
818 	 mmap for backing store.)  */
819       struct page_entry *e, *f = G.free_pages;
820       int i, entries = GGC_QUIRE_SIZE;
821 
822       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
823       if (page == NULL)
824      	{
825 	  page = alloc_anon (NULL, G.pagesize, true);
826           entries = 1;
827 	}
828 
829       /* This loop counts down so that the chain will be in ascending
830 	 memory order.  */
831       for (i = entries - 1; i >= 1; i--)
832 	{
833 	  e = XCNEWVAR (struct page_entry, page_entry_size);
834 	  e->order = order;
835 	  e->bytes = G.pagesize;
836 	  e->page = page + (i << G.lg_pagesize);
837 	  e->next = f;
838 	  f = e;
839 	}
840 
841       G.free_pages = f;
842     }
843   else
844     page = alloc_anon (NULL, entry_size, true);
845 #endif
846 #ifdef USING_MALLOC_PAGE_GROUPS
847   else
848     {
849       /* Allocate a large block of memory and serve out the aligned
850 	 pages therein.  This results in much less memory wastage
851 	 than the traditional implementation of valloc.  */
852 
853       char *allocation, *a, *enda;
854       size_t alloc_size, head_slop, tail_slop;
855       int multiple_pages = (entry_size == G.pagesize);
856 
857       if (multiple_pages)
858 	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
859       else
860 	alloc_size = entry_size + G.pagesize - 1;
861       allocation = XNEWVEC (char, alloc_size);
862 
863       page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
864       head_slop = page - allocation;
865       if (multiple_pages)
866 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
867       else
868 	tail_slop = alloc_size - entry_size - head_slop;
869       enda = allocation + alloc_size - tail_slop;
870 
871       /* We allocated N pages, which are likely not aligned, leaving
872 	 us with N-1 usable pages.  We plan to place the page_group
873 	 structure somewhere in the slop.  */
874       if (head_slop >= sizeof (page_group))
875 	group = (page_group *)page - 1;
876       else
877 	{
878 	  /* We magically got an aligned allocation.  Too bad, we have
879 	     to waste a page anyway.  */
880 	  if (tail_slop == 0)
881 	    {
882 	      enda -= G.pagesize;
883 	      tail_slop += G.pagesize;
884 	    }
885 	  gcc_assert (tail_slop >= sizeof (page_group));
886 	  group = (page_group *)enda;
887 	  tail_slop -= sizeof (page_group);
888 	}
889 
890       /* Remember that we allocated this memory.  */
891       group->next = G.page_groups;
892       group->allocation = allocation;
893       group->alloc_size = alloc_size;
894       group->in_use = 0;
895       G.page_groups = group;
896       G.bytes_mapped += alloc_size;
897 
898       /* If we allocated multiple pages, put the rest on the free list.  */
899       if (multiple_pages)
900 	{
901 	  struct page_entry *e, *f = G.free_pages;
902 	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
903 	    {
904 	      e = XCNEWVAR (struct page_entry, page_entry_size);
905 	      e->order = order;
906 	      e->bytes = G.pagesize;
907 	      e->page = a;
908 	      e->group = group;
909 	      e->next = f;
910 	      f = e;
911 	    }
912 	  G.free_pages = f;
913 	}
914     }
915 #endif
916 
917   if (entry == NULL)
918     entry = XCNEWVAR (struct page_entry, page_entry_size);
919 
920   entry->bytes = entry_size;
921   entry->page = page;
922   entry->context_depth = G.context_depth;
923   entry->order = order;
924   entry->num_free_objects = num_objects;
925   entry->next_bit_hint = 1;
926 
927   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
928 
929 #ifdef USING_MALLOC_PAGE_GROUPS
930   entry->group = group;
931   set_page_group_in_use (group, page);
932 #endif
933 
934   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
935      increment the hint.  */
936   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
937     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
938 
939   set_page_table_entry (page, entry);
940 
941   if (GGC_DEBUG_LEVEL >= 2)
942     fprintf (G.debug_file,
943 	     "Allocating page at %p, object size=%lu, data %p-%p\n",
944 	     (void *) entry, (unsigned long) OBJECT_SIZE (order),
945 	     (void *) page, (void *) (page + entry_size - 1));
946 
947   return entry;
948 }
949 
950 /* Adjust the size of G.depth so that no index greater than the one
951    used by the top of the G.by_depth is used.  */
952 
953 static inline void
adjust_depth(void)954 adjust_depth (void)
955 {
956   page_entry *top;
957 
958   if (G.by_depth_in_use)
959     {
960       top = G.by_depth[G.by_depth_in_use-1];
961 
962       /* Peel back indices in depth that index into by_depth, so that
963 	 as new elements are added to by_depth, we note the indices
964 	 of those elements, if they are for new context depths.  */
965       while (G.depth_in_use > (size_t)top->context_depth+1)
966 	--G.depth_in_use;
967     }
968 }
969 
970 /* For a page that is no longer needed, put it on the free page list.  */
971 
972 static void
free_page(page_entry * entry)973 free_page (page_entry *entry)
974 {
975   if (GGC_DEBUG_LEVEL >= 2)
976     fprintf (G.debug_file,
977 	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
978 	     (void *) entry->page, (void *) (entry->page + entry->bytes - 1));
979 
980   /* Mark the page as inaccessible.  Discard the handle to avoid handle
981      leak.  */
982   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
983 
984   set_page_table_entry (entry->page, NULL);
985 
986 #ifdef USING_MALLOC_PAGE_GROUPS
987   clear_page_group_in_use (entry->group, entry->page);
988 #endif
989 
990   if (G.by_depth_in_use > 1)
991     {
992       page_entry *top = G.by_depth[G.by_depth_in_use-1];
993       int i = entry->index_by_depth;
994 
995       /* We cannot free a page from a context deeper than the current
996 	 one.  */
997       gcc_assert (entry->context_depth == top->context_depth);
998 
999       /* Put top element into freed slot.  */
1000       G.by_depth[i] = top;
1001       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1002       top->index_by_depth = i;
1003     }
1004   --G.by_depth_in_use;
1005 
1006   adjust_depth ();
1007 
1008   entry->next = G.free_pages;
1009   G.free_pages = entry;
1010 }
1011 
1012 /* Release the free page cache to the system.  */
1013 
1014 static void
release_pages(void)1015 release_pages (void)
1016 {
1017   size_t n1 = 0;
1018   size_t n2 = 0;
1019 #ifdef USING_MADVISE
1020   page_entry *p, *start_p;
1021   char *start;
1022   size_t len;
1023   size_t mapped_len;
1024   page_entry *next, *prev, *newprev;
1025   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1026 
1027   /* First free larger continuous areas to the OS.
1028      This allows other allocators to grab these areas if needed.
1029      This is only done on larger chunks to avoid fragmentation.
1030      This does not always work because the free_pages list is only
1031      approximately sorted. */
1032 
1033   p = G.free_pages;
1034   prev = NULL;
1035   while (p)
1036     {
1037       start = p->page;
1038       start_p = p;
1039       len = 0;
1040       mapped_len = 0;
1041       newprev = prev;
1042       while (p && p->page == start + len)
1043         {
1044           len += p->bytes;
1045 	  if (!p->discarded)
1046 	      mapped_len += p->bytes;
1047 	  newprev = p;
1048           p = p->next;
1049         }
1050       if (len >= free_unit)
1051         {
1052           while (start_p != p)
1053             {
1054               next = start_p->next;
1055               free (start_p);
1056               start_p = next;
1057             }
1058           munmap (start, len);
1059 	  if (prev)
1060 	    prev->next = p;
1061           else
1062             G.free_pages = p;
1063           G.bytes_mapped -= mapped_len;
1064 	  n1 += len;
1065 	  continue;
1066         }
1067       prev = newprev;
1068    }
1069 
1070   /* Now give back the fragmented pages to the OS, but keep the address
1071      space to reuse it next time. */
1072 
1073   for (p = G.free_pages; p; )
1074     {
1075       if (p->discarded)
1076         {
1077           p = p->next;
1078           continue;
1079         }
1080       start = p->page;
1081       len = p->bytes;
1082       start_p = p;
1083       p = p->next;
1084       while (p && p->page == start + len)
1085         {
1086           len += p->bytes;
1087           p = p->next;
1088         }
1089       /* Give the page back to the kernel, but don't free the mapping.
1090          This avoids fragmentation in the virtual memory map of the
1091  	 process. Next time we can reuse it by just touching it. */
1092       madvise (start, len, MADV_DONTNEED);
1093       /* Don't count those pages as mapped to not touch the garbage collector
1094          unnecessarily. */
1095       G.bytes_mapped -= len;
1096       n2 += len;
1097       while (start_p != p)
1098         {
1099           start_p->discarded = true;
1100           start_p = start_p->next;
1101         }
1102     }
1103 #endif
1104 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1105   page_entry *p, *next;
1106   char *start;
1107   size_t len;
1108 
1109   /* Gather up adjacent pages so they are unmapped together.  */
1110   p = G.free_pages;
1111 
1112   while (p)
1113     {
1114       start = p->page;
1115       next = p->next;
1116       len = p->bytes;
1117       free (p);
1118       p = next;
1119 
1120       while (p && p->page == start + len)
1121 	{
1122 	  next = p->next;
1123 	  len += p->bytes;
1124 	  free (p);
1125 	  p = next;
1126 	}
1127 
1128       munmap (start, len);
1129       n1 += len;
1130       G.bytes_mapped -= len;
1131     }
1132 
1133   G.free_pages = NULL;
1134 #endif
1135 #ifdef USING_MALLOC_PAGE_GROUPS
1136   page_entry **pp, *p;
1137   page_group **gp, *g;
1138 
1139   /* Remove all pages from free page groups from the list.  */
1140   pp = &G.free_pages;
1141   while ((p = *pp) != NULL)
1142     if (p->group->in_use == 0)
1143       {
1144 	*pp = p->next;
1145 	free (p);
1146       }
1147     else
1148       pp = &p->next;
1149 
1150   /* Remove all free page groups, and release the storage.  */
1151   gp = &G.page_groups;
1152   while ((g = *gp) != NULL)
1153     if (g->in_use == 0)
1154       {
1155 	*gp = g->next;
1156 	G.bytes_mapped -= g->alloc_size;
1157 	n1 += g->alloc_size;
1158 	free (g->allocation);
1159       }
1160     else
1161       gp = &g->next;
1162 #endif
1163   if (!quiet_flag && (n1 || n2))
1164     {
1165       fprintf (stderr, " {GC");
1166       if (n1)
1167 	fprintf (stderr, " released %luk", (unsigned long)(n1 / 1024));
1168       if (n2)
1169 	fprintf (stderr, " madv_dontneed %luk", (unsigned long)(n2 / 1024));
1170       fprintf (stderr, "}");
1171     }
1172 }
1173 
1174 /* This table provides a fast way to determine ceil(log_2(size)) for
1175    allocation requests.  The minimum allocation size is eight bytes.  */
1176 #define NUM_SIZE_LOOKUP 512
1177 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1178 {
1179   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1180   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1181   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1182   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1183   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1184   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1185   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1186   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1187   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1188   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1189   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1190   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1191   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1192   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1193   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1194   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1195   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1196   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1200   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1201   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1202   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1203   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1204   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1205   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1206   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1207   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1208   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1209   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1211 };
1212 
1213 /* For a given size of memory requested for allocation, return the
1214    actual size that is going to be allocated, as well as the size
1215    order.  */
1216 
1217 static void
ggc_round_alloc_size_1(size_t requested_size,size_t * size_order,size_t * alloced_size)1218 ggc_round_alloc_size_1 (size_t requested_size,
1219 			size_t *size_order,
1220 			size_t *alloced_size)
1221 {
1222   size_t order, object_size;
1223 
1224   if (requested_size < NUM_SIZE_LOOKUP)
1225     {
1226       order = size_lookup[requested_size];
1227       object_size = OBJECT_SIZE (order);
1228     }
1229   else
1230     {
1231       order = 10;
1232       while (requested_size > (object_size = OBJECT_SIZE (order)))
1233         order++;
1234     }
1235 
1236   if (size_order)
1237     *size_order = order;
1238   if (alloced_size)
1239     *alloced_size = object_size;
1240 }
1241 
1242 /* For a given size of memory requested for allocation, return the
1243    actual size that is going to be allocated.  */
1244 
1245 size_t
ggc_round_alloc_size(size_t requested_size)1246 ggc_round_alloc_size (size_t requested_size)
1247 {
1248   size_t size = 0;
1249 
1250   ggc_round_alloc_size_1 (requested_size, NULL, &size);
1251   return size;
1252 }
1253 
1254 /* Push a finalizer onto the appropriate vec.  */
1255 
1256 static void
add_finalizer(void * result,void (* f)(void *),size_t s,size_t n)1257 add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1258 {
1259   if (f == NULL)
1260     /* No finalizer.  */;
1261   else if (n == 1)
1262     {
1263       finalizer fin (result, f);
1264       G.finalizers[G.context_depth].safe_push (fin);
1265     }
1266   else
1267     {
1268       vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1269       G.vec_finalizers[G.context_depth].safe_push (fin);
1270     }
1271 }
1272 
1273 /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1274 
1275 void *
ggc_internal_alloc(size_t size,void (* f)(void *),size_t s,size_t n MEM_STAT_DECL)1276 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1277 		    MEM_STAT_DECL)
1278 {
1279   size_t order, word, bit, object_offset, object_size;
1280   struct page_entry *entry;
1281   void *result;
1282 
1283   ggc_round_alloc_size_1 (size, &order, &object_size);
1284 
1285   /* If there are non-full pages for this size allocation, they are at
1286      the head of the list.  */
1287   entry = G.pages[order];
1288 
1289   /* If there is no page for this object size, or all pages in this
1290      context are full, allocate a new page.  */
1291   if (entry == NULL || entry->num_free_objects == 0)
1292     {
1293       struct page_entry *new_entry;
1294       new_entry = alloc_page (order);
1295 
1296       new_entry->index_by_depth = G.by_depth_in_use;
1297       push_by_depth (new_entry, 0);
1298 
1299       /* We can skip context depths, if we do, make sure we go all the
1300 	 way to the new depth.  */
1301       while (new_entry->context_depth >= G.depth_in_use)
1302 	push_depth (G.by_depth_in_use-1);
1303 
1304       /* If this is the only entry, it's also the tail.  If it is not
1305 	 the only entry, then we must update the PREV pointer of the
1306 	 ENTRY (G.pages[order]) to point to our new page entry.  */
1307       if (entry == NULL)
1308 	G.page_tails[order] = new_entry;
1309       else
1310 	entry->prev = new_entry;
1311 
1312       /* Put new pages at the head of the page list.  By definition the
1313 	 entry at the head of the list always has a NULL pointer.  */
1314       new_entry->next = entry;
1315       new_entry->prev = NULL;
1316       entry = new_entry;
1317       G.pages[order] = new_entry;
1318 
1319       /* For a new page, we know the word and bit positions (in the
1320 	 in_use bitmap) of the first available object -- they're zero.  */
1321       new_entry->next_bit_hint = 1;
1322       word = 0;
1323       bit = 0;
1324       object_offset = 0;
1325     }
1326   else
1327     {
1328       /* First try to use the hint left from the previous allocation
1329 	 to locate a clear bit in the in-use bitmap.  We've made sure
1330 	 that the one-past-the-end bit is always set, so if the hint
1331 	 has run over, this test will fail.  */
1332       unsigned hint = entry->next_bit_hint;
1333       word = hint / HOST_BITS_PER_LONG;
1334       bit = hint % HOST_BITS_PER_LONG;
1335 
1336       /* If the hint didn't work, scan the bitmap from the beginning.  */
1337       if ((entry->in_use_p[word] >> bit) & 1)
1338 	{
1339 	  word = bit = 0;
1340 	  while (~entry->in_use_p[word] == 0)
1341 	    ++word;
1342 
1343 #if GCC_VERSION >= 3004
1344 	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1345 #else
1346 	  while ((entry->in_use_p[word] >> bit) & 1)
1347 	    ++bit;
1348 #endif
1349 
1350 	  hint = word * HOST_BITS_PER_LONG + bit;
1351 	}
1352 
1353       /* Next time, try the next bit.  */
1354       entry->next_bit_hint = hint + 1;
1355 
1356       object_offset = hint * object_size;
1357     }
1358 
1359   /* Set the in-use bit.  */
1360   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1361 
1362   /* Keep a running total of the number of free objects.  If this page
1363      fills up, we may have to move it to the end of the list if the
1364      next page isn't full.  If the next page is full, all subsequent
1365      pages are full, so there's no need to move it.  */
1366   if (--entry->num_free_objects == 0
1367       && entry->next != NULL
1368       && entry->next->num_free_objects > 0)
1369     {
1370       /* We have a new head for the list.  */
1371       G.pages[order] = entry->next;
1372 
1373       /* We are moving ENTRY to the end of the page table list.
1374 	 The new page at the head of the list will have NULL in
1375 	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1376       entry->next->prev = NULL;
1377       entry->next = NULL;
1378 
1379       /* Append ENTRY to the tail of the list.  */
1380       entry->prev = G.page_tails[order];
1381       G.page_tails[order]->next = entry;
1382       G.page_tails[order] = entry;
1383     }
1384 
1385   /* Calculate the object's address.  */
1386   result = entry->page + object_offset;
1387   if (GATHER_STATISTICS)
1388     ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1389 			 result FINAL_PASS_MEM_STAT);
1390 
1391 #ifdef ENABLE_GC_CHECKING
1392   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1393      exact same semantics in presence of memory bugs, regardless of
1394      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1395      handle to avoid handle leak.  */
1396   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1397 
1398   /* `Poison' the entire allocated object, including any padding at
1399      the end.  */
1400   memset (result, 0xaf, object_size);
1401 
1402   /* Make the bytes after the end of the object unaccessible.  Discard the
1403      handle to avoid handle leak.  */
1404   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1405 						object_size - size));
1406 #endif
1407 
1408   /* Tell Valgrind that the memory is there, but its content isn't
1409      defined.  The bytes at the end of the object are still marked
1410      unaccessible.  */
1411   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1412 
1413   /* Keep track of how many bytes are being allocated.  This
1414      information is used in deciding when to collect.  */
1415   G.allocated += object_size;
1416 
1417   /* For timevar statistics.  */
1418   timevar_ggc_mem_total += object_size;
1419 
1420   if (f)
1421     add_finalizer (result, f, s, n);
1422 
1423   if (GATHER_STATISTICS)
1424     {
1425       size_t overhead = object_size - size;
1426 
1427       G.stats.total_overhead += overhead;
1428       G.stats.total_allocated += object_size;
1429       G.stats.total_overhead_per_order[order] += overhead;
1430       G.stats.total_allocated_per_order[order] += object_size;
1431 
1432       if (size <= 32)
1433 	{
1434 	  G.stats.total_overhead_under32 += overhead;
1435 	  G.stats.total_allocated_under32 += object_size;
1436 	}
1437       if (size <= 64)
1438 	{
1439 	  G.stats.total_overhead_under64 += overhead;
1440 	  G.stats.total_allocated_under64 += object_size;
1441 	}
1442       if (size <= 128)
1443 	{
1444 	  G.stats.total_overhead_under128 += overhead;
1445 	  G.stats.total_allocated_under128 += object_size;
1446 	}
1447     }
1448 
1449   if (GGC_DEBUG_LEVEL >= 3)
1450     fprintf (G.debug_file,
1451 	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1452 	     (unsigned long) size, (unsigned long) object_size, result,
1453 	     (void *) entry);
1454 
1455   return result;
1456 }
1457 
1458 /* Mark function for strings.  */
1459 
1460 void
gt_ggc_m_S(const void * p)1461 gt_ggc_m_S (const void *p)
1462 {
1463   page_entry *entry;
1464   unsigned bit, word;
1465   unsigned long mask;
1466   unsigned long offset;
1467 
1468   if (!p)
1469     return;
1470 
1471   /* Look up the page on which the object is alloced.  If it was not
1472      GC allocated, gracefully bail out.  */
1473   entry = safe_lookup_page_table_entry (p);
1474   if (!entry)
1475     return;
1476 
1477   /* Calculate the index of the object on the page; this is its bit
1478      position in the in_use_p bitmap.  Note that because a char* might
1479      point to the middle of an object, we need special code here to
1480      make sure P points to the start of an object.  */
1481   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1482   if (offset)
1483     {
1484       /* Here we've seen a char* which does not point to the beginning
1485 	 of an allocated object.  We assume it points to the middle of
1486 	 a STRING_CST.  */
1487       gcc_assert (offset == offsetof (struct tree_string, str));
1488       p = ((const char *) p) - offset;
1489       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1490       return;
1491     }
1492 
1493   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1494   word = bit / HOST_BITS_PER_LONG;
1495   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1496 
1497   /* If the bit was previously set, skip it.  */
1498   if (entry->in_use_p[word] & mask)
1499     return;
1500 
1501   /* Otherwise set it, and decrement the free object count.  */
1502   entry->in_use_p[word] |= mask;
1503   entry->num_free_objects -= 1;
1504 
1505   if (GGC_DEBUG_LEVEL >= 4)
1506     fprintf (G.debug_file, "Marking %p\n", p);
1507 
1508   return;
1509 }
1510 
1511 
1512 /* User-callable entry points for marking string X.  */
1513 
1514 void
gt_ggc_mx(const char * & x)1515 gt_ggc_mx (const char *& x)
1516 {
1517   gt_ggc_m_S (x);
1518 }
1519 
1520 void
gt_ggc_mx(unsigned char * & x)1521 gt_ggc_mx (unsigned char *& x)
1522 {
1523   gt_ggc_m_S (x);
1524 }
1525 
1526 void
gt_ggc_mx(unsigned char & x ATTRIBUTE_UNUSED)1527 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1528 {
1529 }
1530 
1531 /* If P is not marked, marks it and return false.  Otherwise return true.
1532    P must have been allocated by the GC allocator; it mustn't point to
1533    static objects, stack variables, or memory allocated with malloc.  */
1534 
1535 int
ggc_set_mark(const void * p)1536 ggc_set_mark (const void *p)
1537 {
1538   page_entry *entry;
1539   unsigned bit, word;
1540   unsigned long mask;
1541 
1542   /* Look up the page on which the object is alloced.  If the object
1543      wasn't allocated by the collector, we'll probably die.  */
1544   entry = lookup_page_table_entry (p);
1545   gcc_assert (entry);
1546 
1547   /* Calculate the index of the object on the page; this is its bit
1548      position in the in_use_p bitmap.  */
1549   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1550   word = bit / HOST_BITS_PER_LONG;
1551   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1552 
1553   /* If the bit was previously set, skip it.  */
1554   if (entry->in_use_p[word] & mask)
1555     return 1;
1556 
1557   /* Otherwise set it, and decrement the free object count.  */
1558   entry->in_use_p[word] |= mask;
1559   entry->num_free_objects -= 1;
1560 
1561   if (GGC_DEBUG_LEVEL >= 4)
1562     fprintf (G.debug_file, "Marking %p\n", p);
1563 
1564   return 0;
1565 }
1566 
1567 /* Return 1 if P has been marked, zero otherwise.
1568    P must have been allocated by the GC allocator; it mustn't point to
1569    static objects, stack variables, or memory allocated with malloc.  */
1570 
1571 int
ggc_marked_p(const void * p)1572 ggc_marked_p (const void *p)
1573 {
1574   page_entry *entry;
1575   unsigned bit, word;
1576   unsigned long mask;
1577 
1578   /* Look up the page on which the object is alloced.  If the object
1579      wasn't allocated by the collector, we'll probably die.  */
1580   entry = lookup_page_table_entry (p);
1581   gcc_assert (entry);
1582 
1583   /* Calculate the index of the object on the page; this is its bit
1584      position in the in_use_p bitmap.  */
1585   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1586   word = bit / HOST_BITS_PER_LONG;
1587   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1588 
1589   return (entry->in_use_p[word] & mask) != 0;
1590 }
1591 
1592 /* Return the size of the gc-able object P.  */
1593 
1594 size_t
ggc_get_size(const void * p)1595 ggc_get_size (const void *p)
1596 {
1597   page_entry *pe = lookup_page_table_entry (p);
1598   return OBJECT_SIZE (pe->order);
1599 }
1600 
1601 /* Release the memory for object P.  */
1602 
1603 void
ggc_free(void * p)1604 ggc_free (void *p)
1605 {
1606   if (in_gc)
1607     return;
1608 
1609   page_entry *pe = lookup_page_table_entry (p);
1610   size_t order = pe->order;
1611   size_t size = OBJECT_SIZE (order);
1612 
1613   if (GATHER_STATISTICS)
1614     ggc_free_overhead (p);
1615 
1616   if (GGC_DEBUG_LEVEL >= 3)
1617     fprintf (G.debug_file,
1618 	     "Freeing object, actual size=%lu, at %p on %p\n",
1619 	     (unsigned long) size, p, (void *) pe);
1620 
1621 #ifdef ENABLE_GC_CHECKING
1622   /* Poison the data, to indicate the data is garbage.  */
1623   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1624   memset (p, 0xa5, size);
1625 #endif
1626   /* Let valgrind know the object is free.  */
1627   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1628 
1629 #ifdef ENABLE_GC_ALWAYS_COLLECT
1630   /* In the completely-anal-checking mode, we do *not* immediately free
1631      the data, but instead verify that the data is *actually* not
1632      reachable the next time we collect.  */
1633   {
1634     struct free_object *fo = XNEW (struct free_object);
1635     fo->object = p;
1636     fo->next = G.free_object_list;
1637     G.free_object_list = fo;
1638   }
1639 #else
1640   {
1641     unsigned int bit_offset, word, bit;
1642 
1643     G.allocated -= size;
1644 
1645     /* Mark the object not-in-use.  */
1646     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1647     word = bit_offset / HOST_BITS_PER_LONG;
1648     bit = bit_offset % HOST_BITS_PER_LONG;
1649     pe->in_use_p[word] &= ~(1UL << bit);
1650 
1651     if (pe->num_free_objects++ == 0)
1652       {
1653 	page_entry *p, *q;
1654 
1655 	/* If the page is completely full, then it's supposed to
1656 	   be after all pages that aren't.  Since we've freed one
1657 	   object from a page that was full, we need to move the
1658 	   page to the head of the list.
1659 
1660 	   PE is the node we want to move.  Q is the previous node
1661 	   and P is the next node in the list.  */
1662 	q = pe->prev;
1663 	if (q && q->num_free_objects == 0)
1664 	  {
1665 	    p = pe->next;
1666 
1667 	    q->next = p;
1668 
1669 	    /* If PE was at the end of the list, then Q becomes the
1670 	       new end of the list.  If PE was not the end of the
1671 	       list, then we need to update the PREV field for P.  */
1672 	    if (!p)
1673 	      G.page_tails[order] = q;
1674 	    else
1675 	      p->prev = q;
1676 
1677 	    /* Move PE to the head of the list.  */
1678 	    pe->next = G.pages[order];
1679 	    pe->prev = NULL;
1680 	    G.pages[order]->prev = pe;
1681 	    G.pages[order] = pe;
1682 	  }
1683 
1684 	/* Reset the hint bit to point to the only free object.  */
1685 	pe->next_bit_hint = bit_offset;
1686       }
1687   }
1688 #endif
1689 }
1690 
1691 /* Subroutine of init_ggc which computes the pair of numbers used to
1692    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1693 
1694    This algorithm is taken from Granlund and Montgomery's paper
1695    "Division by Invariant Integers using Multiplication"
1696    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1697    constants).  */
1698 
1699 static void
compute_inverse(unsigned order)1700 compute_inverse (unsigned order)
1701 {
1702   size_t size, inv;
1703   unsigned int e;
1704 
1705   size = OBJECT_SIZE (order);
1706   e = 0;
1707   while (size % 2 == 0)
1708     {
1709       e++;
1710       size >>= 1;
1711     }
1712 
1713   inv = size;
1714   while (inv * size != 1)
1715     inv = inv * (2 - inv*size);
1716 
1717   DIV_MULT (order) = inv;
1718   DIV_SHIFT (order) = e;
1719 }
1720 
1721 /* Initialize the ggc-mmap allocator.  */
1722 void
init_ggc(void)1723 init_ggc (void)
1724 {
1725   static bool init_p = false;
1726   unsigned order;
1727 
1728   if (init_p)
1729     return;
1730   init_p = true;
1731 
1732   G.pagesize = getpagesize ();
1733   G.lg_pagesize = exact_log2 (G.pagesize);
1734 
1735 #ifdef HAVE_MMAP_DEV_ZERO
1736   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1737   if (G.dev_zero_fd == -1)
1738     internal_error ("open /dev/zero: %m");
1739 #endif
1740 
1741 #if 0
1742   G.debug_file = fopen ("ggc-mmap.debug", "w");
1743 #else
1744   G.debug_file = stdout;
1745 #endif
1746 
1747 #ifdef USING_MMAP
1748   /* StunOS has an amazing off-by-one error for the first mmap allocation
1749      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1750      believe, is an unaligned page allocation, which would cause us to
1751      hork badly if we tried to use it.  */
1752   {
1753     char *p = alloc_anon (NULL, G.pagesize, true);
1754     struct page_entry *e;
1755     if ((uintptr_t)p & (G.pagesize - 1))
1756       {
1757 	/* How losing.  Discard this one and try another.  If we still
1758 	   can't get something useful, give up.  */
1759 
1760 	p = alloc_anon (NULL, G.pagesize, true);
1761 	gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1762       }
1763 
1764     /* We have a good page, might as well hold onto it...  */
1765     e = XCNEW (struct page_entry);
1766     e->bytes = G.pagesize;
1767     e->page = p;
1768     e->next = G.free_pages;
1769     G.free_pages = e;
1770   }
1771 #endif
1772 
1773   /* Initialize the object size table.  */
1774   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1775     object_size_table[order] = (size_t) 1 << order;
1776   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1777     {
1778       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1779 
1780       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1781 	 so that we're sure of getting aligned memory.  */
1782       s = ROUND_UP (s, MAX_ALIGNMENT);
1783       object_size_table[order] = s;
1784     }
1785 
1786   /* Initialize the objects-per-page and inverse tables.  */
1787   for (order = 0; order < NUM_ORDERS; ++order)
1788     {
1789       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1790       if (objects_per_page_table[order] == 0)
1791 	objects_per_page_table[order] = 1;
1792       compute_inverse (order);
1793     }
1794 
1795   /* Reset the size_lookup array to put appropriately sized objects in
1796      the special orders.  All objects bigger than the previous power
1797      of two, but no greater than the special size, should go in the
1798      new order.  */
1799   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1800     {
1801       int o;
1802       int i;
1803 
1804       i = OBJECT_SIZE (order);
1805       if (i >= NUM_SIZE_LOOKUP)
1806 	continue;
1807 
1808       for (o = size_lookup[i]; o == size_lookup [i]; --i)
1809 	size_lookup[i] = order;
1810     }
1811 
1812   G.depth_in_use = 0;
1813   G.depth_max = 10;
1814   G.depth = XNEWVEC (unsigned int, G.depth_max);
1815 
1816   G.by_depth_in_use = 0;
1817   G.by_depth_max = INITIAL_PTE_COUNT;
1818   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1819   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1820 
1821   /* Allocate space for the depth 0 finalizers.  */
1822   G.finalizers.safe_push (vNULL);
1823   G.vec_finalizers.safe_push (vNULL);
1824   gcc_assert (G.finalizers.length() == 1);
1825 }
1826 
1827 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1828    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1829 
1830 static void
ggc_recalculate_in_use_p(page_entry * p)1831 ggc_recalculate_in_use_p (page_entry *p)
1832 {
1833   unsigned int i;
1834   size_t num_objects;
1835 
1836   /* Because the past-the-end bit in in_use_p is always set, we
1837      pretend there is one additional object.  */
1838   num_objects = OBJECTS_IN_PAGE (p) + 1;
1839 
1840   /* Reset the free object count.  */
1841   p->num_free_objects = num_objects;
1842 
1843   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1844   for (i = 0;
1845        i < CEIL (BITMAP_SIZE (num_objects),
1846 		 sizeof (*p->in_use_p));
1847        ++i)
1848     {
1849       unsigned long j;
1850 
1851       /* Something is in use if it is marked, or if it was in use in a
1852 	 context further down the context stack.  */
1853       p->in_use_p[i] |= save_in_use_p (p)[i];
1854 
1855       /* Decrement the free object count for every object allocated.  */
1856       for (j = p->in_use_p[i]; j; j >>= 1)
1857 	p->num_free_objects -= (j & 1);
1858     }
1859 
1860   gcc_assert (p->num_free_objects < num_objects);
1861 }
1862 
1863 /* Unmark all objects.  */
1864 
1865 static void
clear_marks(void)1866 clear_marks (void)
1867 {
1868   unsigned order;
1869 
1870   for (order = 2; order < NUM_ORDERS; order++)
1871     {
1872       page_entry *p;
1873 
1874       for (p = G.pages[order]; p != NULL; p = p->next)
1875 	{
1876 	  size_t num_objects = OBJECTS_IN_PAGE (p);
1877 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1878 
1879 	  /* The data should be page-aligned.  */
1880 	  gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1881 
1882 	  /* Pages that aren't in the topmost context are not collected;
1883 	     nevertheless, we need their in-use bit vectors to store GC
1884 	     marks.  So, back them up first.  */
1885 	  if (p->context_depth < G.context_depth)
1886 	    {
1887 	      if (! save_in_use_p (p))
1888 		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1889 	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1890 	    }
1891 
1892 	  /* Reset reset the number of free objects and clear the
1893              in-use bits.  These will be adjusted by mark_obj.  */
1894 	  p->num_free_objects = num_objects;
1895 	  memset (p->in_use_p, 0, bitmap_size);
1896 
1897 	  /* Make sure the one-past-the-end bit is always set.  */
1898 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1899 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1900 	}
1901     }
1902 }
1903 
1904 /* Check if any blocks with a registered finalizer have become unmarked. If so
1905    run the finalizer and unregister it because the block is about to be freed.
1906    Note that no garantee is made about what order finalizers will run in so
1907    touching other objects in gc memory is extremely unwise.  */
1908 
1909 static void
ggc_handle_finalizers()1910 ggc_handle_finalizers ()
1911 {
1912   unsigned dlen = G.finalizers.length();
1913   for (unsigned d = G.context_depth; d < dlen; ++d)
1914     {
1915       vec<finalizer> &v = G.finalizers[d];
1916       unsigned length = v.length ();
1917       for (unsigned int i = 0; i < length;)
1918 	{
1919 	  finalizer &f = v[i];
1920 	  if (!ggc_marked_p (f.addr ()))
1921 	    {
1922 	      f.call ();
1923 	      v.unordered_remove (i);
1924 	      length--;
1925 	    }
1926 	  else
1927 	    i++;
1928 	}
1929     }
1930 
1931   gcc_assert (dlen == G.vec_finalizers.length());
1932   for (unsigned d = G.context_depth; d < dlen; ++d)
1933     {
1934       vec<vec_finalizer> &vv = G.vec_finalizers[d];
1935       unsigned length = vv.length ();
1936       for (unsigned int i = 0; i < length;)
1937 	{
1938 	  vec_finalizer &f = vv[i];
1939 	  if (!ggc_marked_p (f.addr ()))
1940 	    {
1941 	      f.call ();
1942 	      vv.unordered_remove (i);
1943 	      length--;
1944 	    }
1945 	  else
1946 	    i++;
1947 	}
1948     }
1949 }
1950 
1951 /* Free all empty pages.  Partially empty pages need no attention
1952    because the `mark' bit doubles as an `unused' bit.  */
1953 
1954 static void
sweep_pages(void)1955 sweep_pages (void)
1956 {
1957   unsigned order;
1958 
1959   for (order = 2; order < NUM_ORDERS; order++)
1960     {
1961       /* The last page-entry to consider, regardless of entries
1962 	 placed at the end of the list.  */
1963       page_entry * const last = G.page_tails[order];
1964 
1965       size_t num_objects;
1966       size_t live_objects;
1967       page_entry *p, *previous;
1968       int done;
1969 
1970       p = G.pages[order];
1971       if (p == NULL)
1972 	continue;
1973 
1974       previous = NULL;
1975       do
1976 	{
1977 	  page_entry *next = p->next;
1978 
1979 	  /* Loop until all entries have been examined.  */
1980 	  done = (p == last);
1981 
1982 	  num_objects = OBJECTS_IN_PAGE (p);
1983 
1984 	  /* Add all live objects on this page to the count of
1985              allocated memory.  */
1986 	  live_objects = num_objects - p->num_free_objects;
1987 
1988 	  G.allocated += OBJECT_SIZE (order) * live_objects;
1989 
1990 	  /* Only objects on pages in the topmost context should get
1991 	     collected.  */
1992 	  if (p->context_depth < G.context_depth)
1993 	    ;
1994 
1995 	  /* Remove the page if it's empty.  */
1996 	  else if (live_objects == 0)
1997 	    {
1998 	      /* If P was the first page in the list, then NEXT
1999 		 becomes the new first page in the list, otherwise
2000 		 splice P out of the forward pointers.  */
2001 	      if (! previous)
2002 		G.pages[order] = next;
2003 	      else
2004 		previous->next = next;
2005 
2006 	      /* Splice P out of the back pointers too.  */
2007 	      if (next)
2008 		next->prev = previous;
2009 
2010 	      /* Are we removing the last element?  */
2011 	      if (p == G.page_tails[order])
2012 		G.page_tails[order] = previous;
2013 	      free_page (p);
2014 	      p = previous;
2015 	    }
2016 
2017 	  /* If the page is full, move it to the end.  */
2018 	  else if (p->num_free_objects == 0)
2019 	    {
2020 	      /* Don't move it if it's already at the end.  */
2021 	      if (p != G.page_tails[order])
2022 		{
2023 		  /* Move p to the end of the list.  */
2024 		  p->next = NULL;
2025 		  p->prev = G.page_tails[order];
2026 		  G.page_tails[order]->next = p;
2027 
2028 		  /* Update the tail pointer...  */
2029 		  G.page_tails[order] = p;
2030 
2031 		  /* ... and the head pointer, if necessary.  */
2032 		  if (! previous)
2033 		    G.pages[order] = next;
2034 		  else
2035 		    previous->next = next;
2036 
2037 		  /* And update the backpointer in NEXT if necessary.  */
2038 		  if (next)
2039 		    next->prev = previous;
2040 
2041 		  p = previous;
2042 		}
2043 	    }
2044 
2045 	  /* If we've fallen through to here, it's a page in the
2046 	     topmost context that is neither full nor empty.  Such a
2047 	     page must precede pages at lesser context depth in the
2048 	     list, so move it to the head.  */
2049 	  else if (p != G.pages[order])
2050 	    {
2051 	      previous->next = p->next;
2052 
2053 	      /* Update the backchain in the next node if it exists.  */
2054 	      if (p->next)
2055 		p->next->prev = previous;
2056 
2057 	      /* Move P to the head of the list.  */
2058 	      p->next = G.pages[order];
2059 	      p->prev = NULL;
2060 	      G.pages[order]->prev = p;
2061 
2062 	      /* Update the head pointer.  */
2063 	      G.pages[order] = p;
2064 
2065 	      /* Are we moving the last element?  */
2066 	      if (G.page_tails[order] == p)
2067 	        G.page_tails[order] = previous;
2068 	      p = previous;
2069 	    }
2070 
2071 	  previous = p;
2072 	  p = next;
2073 	}
2074       while (! done);
2075 
2076       /* Now, restore the in_use_p vectors for any pages from contexts
2077          other than the current one.  */
2078       for (p = G.pages[order]; p; p = p->next)
2079 	if (p->context_depth != G.context_depth)
2080 	  ggc_recalculate_in_use_p (p);
2081     }
2082 }
2083 
2084 #ifdef ENABLE_GC_CHECKING
2085 /* Clobber all free objects.  */
2086 
2087 static void
poison_pages(void)2088 poison_pages (void)
2089 {
2090   unsigned order;
2091 
2092   for (order = 2; order < NUM_ORDERS; order++)
2093     {
2094       size_t size = OBJECT_SIZE (order);
2095       page_entry *p;
2096 
2097       for (p = G.pages[order]; p != NULL; p = p->next)
2098 	{
2099 	  size_t num_objects;
2100 	  size_t i;
2101 
2102 	  if (p->context_depth != G.context_depth)
2103 	    /* Since we don't do any collection for pages in pushed
2104 	       contexts, there's no need to do any poisoning.  And
2105 	       besides, the IN_USE_P array isn't valid until we pop
2106 	       contexts.  */
2107 	    continue;
2108 
2109 	  num_objects = OBJECTS_IN_PAGE (p);
2110 	  for (i = 0; i < num_objects; i++)
2111 	    {
2112 	      size_t word, bit;
2113 	      word = i / HOST_BITS_PER_LONG;
2114 	      bit = i % HOST_BITS_PER_LONG;
2115 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
2116 		{
2117 		  char *object = p->page + i * size;
2118 
2119 		  /* Keep poison-by-write when we expect to use Valgrind,
2120 		     so the exact same memory semantics is kept, in case
2121 		     there are memory errors.  We override this request
2122 		     below.  */
2123 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2124 								 size));
2125 		  memset (object, 0xa5, size);
2126 
2127 		  /* Drop the handle to avoid handle leak.  */
2128 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2129 		}
2130 	    }
2131 	}
2132     }
2133 }
2134 #else
2135 #define poison_pages()
2136 #endif
2137 
2138 #ifdef ENABLE_GC_ALWAYS_COLLECT
2139 /* Validate that the reportedly free objects actually are.  */
2140 
2141 static void
validate_free_objects(void)2142 validate_free_objects (void)
2143 {
2144   struct free_object *f, *next, *still_free = NULL;
2145 
2146   for (f = G.free_object_list; f ; f = next)
2147     {
2148       page_entry *pe = lookup_page_table_entry (f->object);
2149       size_t bit, word;
2150 
2151       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2152       word = bit / HOST_BITS_PER_LONG;
2153       bit = bit % HOST_BITS_PER_LONG;
2154       next = f->next;
2155 
2156       /* Make certain it isn't visible from any root.  Notice that we
2157 	 do this check before sweep_pages merges save_in_use_p.  */
2158       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2159 
2160       /* If the object comes from an outer context, then retain the
2161 	 free_object entry, so that we can verify that the address
2162 	 isn't live on the stack in some outer context.  */
2163       if (pe->context_depth != G.context_depth)
2164 	{
2165 	  f->next = still_free;
2166 	  still_free = f;
2167 	}
2168       else
2169 	free (f);
2170     }
2171 
2172   G.free_object_list = still_free;
2173 }
2174 #else
2175 #define validate_free_objects()
2176 #endif
2177 
2178 /* Top level mark-and-sweep routine.  */
2179 
2180 void
ggc_collect(void)2181 ggc_collect (void)
2182 {
2183   /* Avoid frequent unnecessary work by skipping collection if the
2184      total allocations haven't expanded much since the last
2185      collection.  */
2186   float allocated_last_gc =
2187     MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * 1024);
2188 
2189   /* It is also good time to get memory block pool into limits.  */
2190   memory_block_pool::trim ();
2191 
2192   float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
2193   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2194     return;
2195 
2196   timevar_push (TV_GC);
2197   if (GGC_DEBUG_LEVEL >= 2)
2198     fprintf (G.debug_file, "BEGIN COLLECTING\n");
2199 
2200   /* Zero the total allocated bytes.  This will be recalculated in the
2201      sweep phase.  */
2202   size_t allocated = G.allocated;
2203   G.allocated = 0;
2204 
2205   /* Release the pages we freed the last time we collected, but didn't
2206      reuse in the interim.  */
2207   release_pages ();
2208 
2209   /* Output this later so we do not interfere with release_pages.  */
2210   if (!quiet_flag)
2211     fprintf (stderr, " {GC %luk -> ", (unsigned long) allocated / 1024);
2212 
2213   /* Indicate that we've seen collections at this context depth.  */
2214   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2215 
2216   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2217 
2218   in_gc = true;
2219   clear_marks ();
2220   ggc_mark_roots ();
2221   ggc_handle_finalizers ();
2222 
2223   if (GATHER_STATISTICS)
2224     ggc_prune_overhead_list ();
2225 
2226   poison_pages ();
2227   validate_free_objects ();
2228   sweep_pages ();
2229 
2230   in_gc = false;
2231   G.allocated_last_gc = G.allocated;
2232 
2233   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2234 
2235   timevar_pop (TV_GC);
2236 
2237   if (!quiet_flag)
2238     fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2239   if (GGC_DEBUG_LEVEL >= 2)
2240     fprintf (G.debug_file, "END COLLECTING\n");
2241 }
2242 
2243 /* Return free pages to the system.  */
2244 
2245 void
ggc_trim()2246 ggc_trim ()
2247 {
2248   timevar_push (TV_GC);
2249   G.allocated = 0;
2250   sweep_pages ();
2251   release_pages ();
2252   if (!quiet_flag)
2253     fprintf (stderr, " {GC trimmed to %luk, %luk mapped}",
2254 	     (unsigned long) G.allocated / 1024,
2255 	     (unsigned long) G.bytes_mapped / 1024);
2256   timevar_pop (TV_GC);
2257 }
2258 
2259 /* Assume that all GGC memory is reachable and grow the limits for next
2260    collection.  With checking, trigger GGC so -Q compilation outputs how much
2261    of memory really is reachable.  */
2262 
2263 void
ggc_grow(void)2264 ggc_grow (void)
2265 {
2266   if (!flag_checking)
2267     G.allocated_last_gc = MAX (G.allocated_last_gc,
2268 			       G.allocated);
2269   else
2270     ggc_collect ();
2271   if (!quiet_flag)
2272     fprintf (stderr, " {GC %luk} ", (unsigned long) G.allocated / 1024);
2273 }
2274 
2275 void
ggc_print_statistics(void)2276 ggc_print_statistics (void)
2277 {
2278   struct ggc_statistics stats;
2279   unsigned int i;
2280   size_t total_overhead = 0;
2281 
2282   /* Clear the statistics.  */
2283   memset (&stats, 0, sizeof (stats));
2284 
2285   /* Make sure collection will really occur.  */
2286   G.allocated_last_gc = 0;
2287 
2288   /* Collect and print the statistics common across collectors.  */
2289   ggc_print_common_statistics (stderr, &stats);
2290 
2291   /* Release free pages so that we will not count the bytes allocated
2292      there as part of the total allocated memory.  */
2293   release_pages ();
2294 
2295   /* Collect some information about the various sizes of
2296      allocation.  */
2297   fprintf (stderr,
2298            "Memory still allocated at the end of the compilation process\n");
2299   fprintf (stderr, "%-8s %10s  %10s  %10s\n",
2300 	   "Size", "Allocated", "Used", "Overhead");
2301   for (i = 0; i < NUM_ORDERS; ++i)
2302     {
2303       page_entry *p;
2304       size_t allocated;
2305       size_t in_use;
2306       size_t overhead;
2307 
2308       /* Skip empty entries.  */
2309       if (!G.pages[i])
2310 	continue;
2311 
2312       overhead = allocated = in_use = 0;
2313 
2314       /* Figure out the total number of bytes allocated for objects of
2315 	 this size, and how many of them are actually in use.  Also figure
2316 	 out how much memory the page table is using.  */
2317       for (p = G.pages[i]; p; p = p->next)
2318 	{
2319 	  allocated += p->bytes;
2320 	  in_use +=
2321 	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2322 
2323 	  overhead += (sizeof (page_entry) - sizeof (long)
2324 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2325 	}
2326       fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
2327 	       PRsa (10) "\n",
2328 	       (uint64_t)OBJECT_SIZE (i),
2329 	       SIZE_AMOUNT (allocated),
2330 	       SIZE_AMOUNT (in_use),
2331 	       SIZE_AMOUNT (overhead));
2332       total_overhead += overhead;
2333     }
2334   fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
2335 	   "Total",
2336 	   SIZE_AMOUNT (G.bytes_mapped),
2337 	   SIZE_AMOUNT (G.allocated),
2338 	   SIZE_AMOUNT (total_overhead));
2339 
2340   if (GATHER_STATISTICS)
2341     {
2342       fprintf (stderr, "\nTotal allocations and overheads during "
2343 	       "the compilation process\n");
2344 
2345       fprintf (stderr, "Total Overhead:                          "
2346 	       PRsa (9) "\n",
2347 	       SIZE_AMOUNT (G.stats.total_overhead));
2348       fprintf (stderr, "Total Allocated:                         "
2349 	       PRsa (9) "\n",
2350 	       SIZE_AMOUNT (G.stats.total_allocated));
2351 
2352       fprintf (stderr, "Total Overhead  under  32B:              "
2353 	       PRsa (9) "\n",
2354 	       SIZE_AMOUNT (G.stats.total_overhead_under32));
2355       fprintf (stderr, "Total Allocated under  32B:              "
2356 	       PRsa (9) "\n",
2357 	       SIZE_AMOUNT (G.stats.total_allocated_under32));
2358       fprintf (stderr, "Total Overhead  under  64B:              "
2359 	       PRsa (9) "\n",
2360 	       SIZE_AMOUNT (G.stats.total_overhead_under64));
2361       fprintf (stderr, "Total Allocated under  64B:              "
2362 	       PRsa (9) "\n",
2363 	       SIZE_AMOUNT (G.stats.total_allocated_under64));
2364       fprintf (stderr, "Total Overhead  under 128B:              "
2365 	       PRsa (9) "\n",
2366 	       SIZE_AMOUNT (G.stats.total_overhead_under128));
2367       fprintf (stderr, "Total Allocated under 128B:              "
2368 	       PRsa (9) "\n",
2369 	       SIZE_AMOUNT (G.stats.total_allocated_under128));
2370 
2371       for (i = 0; i < NUM_ORDERS; i++)
2372 	if (G.stats.total_allocated_per_order[i])
2373 	  {
2374 	    fprintf (stderr, "Total Overhead  page size %9" PRIu64 ":     "
2375 		     PRsa (9) "\n",
2376 		     (uint64_t)OBJECT_SIZE (i),
2377 		     SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
2378 	    fprintf (stderr, "Total Allocated page size %9" PRIu64 ":     "
2379 		     PRsa (9) "\n",
2380 		     (uint64_t)OBJECT_SIZE (i),
2381 		     SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
2382 	  }
2383   }
2384 }
2385 
2386 struct ggc_pch_ondisk
2387 {
2388   unsigned totals[NUM_ORDERS];
2389 };
2390 
2391 struct ggc_pch_data
2392 {
2393   struct ggc_pch_ondisk d;
2394   uintptr_t base[NUM_ORDERS];
2395   size_t written[NUM_ORDERS];
2396 };
2397 
2398 struct ggc_pch_data *
init_ggc_pch(void)2399 init_ggc_pch (void)
2400 {
2401   return XCNEW (struct ggc_pch_data);
2402 }
2403 
2404 void
ggc_pch_count_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2405 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2406 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2407 {
2408   unsigned order;
2409 
2410   if (size < NUM_SIZE_LOOKUP)
2411     order = size_lookup[size];
2412   else
2413     {
2414       order = 10;
2415       while (size > OBJECT_SIZE (order))
2416 	order++;
2417     }
2418 
2419   d->d.totals[order]++;
2420 }
2421 
2422 size_t
ggc_pch_total_size(struct ggc_pch_data * d)2423 ggc_pch_total_size (struct ggc_pch_data *d)
2424 {
2425   size_t a = 0;
2426   unsigned i;
2427 
2428   for (i = 0; i < NUM_ORDERS; i++)
2429     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2430   return a;
2431 }
2432 
2433 void
ggc_pch_this_base(struct ggc_pch_data * d,void * base)2434 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2435 {
2436   uintptr_t a = (uintptr_t) base;
2437   unsigned i;
2438 
2439   for (i = 0; i < NUM_ORDERS; i++)
2440     {
2441       d->base[i] = a;
2442       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2443     }
2444 }
2445 
2446 
2447 char *
ggc_pch_alloc_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2448 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2449 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2450 {
2451   unsigned order;
2452   char *result;
2453 
2454   if (size < NUM_SIZE_LOOKUP)
2455     order = size_lookup[size];
2456   else
2457     {
2458       order = 10;
2459       while (size > OBJECT_SIZE (order))
2460 	order++;
2461     }
2462 
2463   result = (char *) d->base[order];
2464   d->base[order] += OBJECT_SIZE (order);
2465   return result;
2466 }
2467 
2468 void
ggc_pch_prepare_write(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f ATTRIBUTE_UNUSED)2469 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2470 		       FILE *f ATTRIBUTE_UNUSED)
2471 {
2472   /* Nothing to do.  */
2473 }
2474 
2475 void
ggc_pch_write_object(struct ggc_pch_data * d,FILE * f,void * x,void * newx ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2476 ggc_pch_write_object (struct ggc_pch_data *d,
2477 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2478 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2479 {
2480   unsigned order;
2481   static const char emptyBytes[256] = { 0 };
2482 
2483   if (size < NUM_SIZE_LOOKUP)
2484     order = size_lookup[size];
2485   else
2486     {
2487       order = 10;
2488       while (size > OBJECT_SIZE (order))
2489 	order++;
2490     }
2491 
2492   if (fwrite (x, size, 1, f) != 1)
2493     fatal_error (input_location, "cannot write PCH file: %m");
2494 
2495   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2496      object out to OBJECT_SIZE(order).  This happens for strings.  */
2497 
2498   if (size != OBJECT_SIZE (order))
2499     {
2500       unsigned padding = OBJECT_SIZE (order) - size;
2501 
2502       /* To speed small writes, we use a nulled-out array that's larger
2503          than most padding requests as the source for our null bytes.  This
2504          permits us to do the padding with fwrite() rather than fseek(), and
2505          limits the chance the OS may try to flush any outstanding writes.  */
2506       if (padding <= sizeof (emptyBytes))
2507         {
2508           if (fwrite (emptyBytes, 1, padding, f) != padding)
2509 	    fatal_error (input_location, "cannot write PCH file");
2510         }
2511       else
2512         {
2513           /* Larger than our buffer?  Just default to fseek.  */
2514           if (fseek (f, padding, SEEK_CUR) != 0)
2515 	    fatal_error (input_location, "cannot write PCH file");
2516         }
2517     }
2518 
2519   d->written[order]++;
2520   if (d->written[order] == d->d.totals[order]
2521       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2522 				   G.pagesize),
2523 		SEEK_CUR) != 0)
2524     fatal_error (input_location, "cannot write PCH file: %m");
2525 }
2526 
2527 void
ggc_pch_finish(struct ggc_pch_data * d,FILE * f)2528 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2529 {
2530   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2531     fatal_error (input_location, "cannot write PCH file: %m");
2532   free (d);
2533 }
2534 
2535 /* Move the PCH PTE entries just added to the end of by_depth, to the
2536    front.  */
2537 
2538 static void
move_ptes_to_front(int count_old_page_tables,int count_new_page_tables)2539 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2540 {
2541   /* First, we swap the new entries to the front of the varrays.  */
2542   page_entry **new_by_depth;
2543   unsigned long **new_save_in_use;
2544 
2545   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2546   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2547 
2548   memcpy (&new_by_depth[0],
2549 	  &G.by_depth[count_old_page_tables],
2550 	  count_new_page_tables * sizeof (void *));
2551   memcpy (&new_by_depth[count_new_page_tables],
2552 	  &G.by_depth[0],
2553 	  count_old_page_tables * sizeof (void *));
2554   memcpy (&new_save_in_use[0],
2555 	  &G.save_in_use[count_old_page_tables],
2556 	  count_new_page_tables * sizeof (void *));
2557   memcpy (&new_save_in_use[count_new_page_tables],
2558 	  &G.save_in_use[0],
2559 	  count_old_page_tables * sizeof (void *));
2560 
2561   free (G.by_depth);
2562   free (G.save_in_use);
2563 
2564   G.by_depth = new_by_depth;
2565   G.save_in_use = new_save_in_use;
2566 
2567   /* Now update all the index_by_depth fields.  */
2568   for (unsigned i = G.by_depth_in_use; i--;)
2569     {
2570       page_entry *p = G.by_depth[i];
2571       p->index_by_depth = i;
2572     }
2573 
2574   /* And last, we update the depth pointers in G.depth.  The first
2575      entry is already 0, and context 0 entries always start at index
2576      0, so there is nothing to update in the first slot.  We need a
2577      second slot, only if we have old ptes, and if we do, they start
2578      at index count_new_page_tables.  */
2579   if (count_old_page_tables)
2580     push_depth (count_new_page_tables);
2581 }
2582 
2583 void
ggc_pch_read(FILE * f,void * addr)2584 ggc_pch_read (FILE *f, void *addr)
2585 {
2586   struct ggc_pch_ondisk d;
2587   unsigned i;
2588   char *offs = (char *) addr;
2589   unsigned long count_old_page_tables;
2590   unsigned long count_new_page_tables;
2591 
2592   count_old_page_tables = G.by_depth_in_use;
2593 
2594   if (fread (&d, sizeof (d), 1, f) != 1)
2595     fatal_error (input_location, "cannot read PCH file: %m");
2596 
2597   /* We've just read in a PCH file.  So, every object that used to be
2598      allocated is now free.  */
2599   clear_marks ();
2600 #ifdef ENABLE_GC_CHECKING
2601   poison_pages ();
2602 #endif
2603   /* Since we free all the allocated objects, the free list becomes
2604      useless.  Validate it now, which will also clear it.  */
2605   validate_free_objects ();
2606 
2607   /* No object read from a PCH file should ever be freed.  So, set the
2608      context depth to 1, and set the depth of all the currently-allocated
2609      pages to be 1 too.  PCH pages will have depth 0.  */
2610   gcc_assert (!G.context_depth);
2611   G.context_depth = 1;
2612   /* Allocate space for the depth 1 finalizers.  */
2613   G.finalizers.safe_push (vNULL);
2614   G.vec_finalizers.safe_push (vNULL);
2615   gcc_assert (G.finalizers.length() == 2);
2616   for (i = 0; i < NUM_ORDERS; i++)
2617     {
2618       page_entry *p;
2619       for (p = G.pages[i]; p != NULL; p = p->next)
2620 	p->context_depth = G.context_depth;
2621     }
2622 
2623   /* Allocate the appropriate page-table entries for the pages read from
2624      the PCH file.  */
2625 
2626   for (i = 0; i < NUM_ORDERS; i++)
2627     {
2628       struct page_entry *entry;
2629       char *pte;
2630       size_t bytes;
2631       size_t num_objs;
2632       size_t j;
2633 
2634       if (d.totals[i] == 0)
2635 	continue;
2636 
2637       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2638       num_objs = bytes / OBJECT_SIZE (i);
2639       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2640 					    - sizeof (long)
2641 					    + BITMAP_SIZE (num_objs + 1)));
2642       entry->bytes = bytes;
2643       entry->page = offs;
2644       entry->context_depth = 0;
2645       offs += bytes;
2646       entry->num_free_objects = 0;
2647       entry->order = i;
2648 
2649       for (j = 0;
2650 	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2651 	   j += HOST_BITS_PER_LONG)
2652 	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2653       for (; j < num_objs + 1; j++)
2654 	entry->in_use_p[j / HOST_BITS_PER_LONG]
2655 	  |= 1L << (j % HOST_BITS_PER_LONG);
2656 
2657       for (pte = entry->page;
2658 	   pte < entry->page + entry->bytes;
2659 	   pte += G.pagesize)
2660 	set_page_table_entry (pte, entry);
2661 
2662       if (G.page_tails[i] != NULL)
2663 	G.page_tails[i]->next = entry;
2664       else
2665 	G.pages[i] = entry;
2666       G.page_tails[i] = entry;
2667 
2668       /* We start off by just adding all the new information to the
2669 	 end of the varrays, later, we will move the new information
2670 	 to the front of the varrays, as the PCH page tables are at
2671 	 context 0.  */
2672       push_by_depth (entry, 0);
2673     }
2674 
2675   /* Now, we update the various data structures that speed page table
2676      handling.  */
2677   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2678 
2679   move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2680 
2681   /* Update the statistics.  */
2682   G.allocated = G.allocated_last_gc = offs - (char *)addr;
2683 }
2684