1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2    Copyright (C) 1999-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "alias.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "diagnostic-core.h"
29 #include "flags.h"
30 #include "ggc-internal.h"
31 #include "timevar.h"
32 #include "params.h"
33 #include "cgraph.h"
34 #include "cfgloop.h"
35 #include "plugin.h"
36 
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38    file open.  Prefer either to valloc.  */
39 #ifdef HAVE_MMAP_ANON
40 # undef HAVE_MMAP_DEV_ZERO
41 # define USING_MMAP
42 #endif
43 
44 #ifdef HAVE_MMAP_DEV_ZERO
45 # define USING_MMAP
46 #endif
47 
48 #ifndef USING_MMAP
49 #define USING_MALLOC_PAGE_GROUPS
50 #endif
51 
52 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53     && defined(USING_MMAP)
54 # define USING_MADVISE
55 #endif
56 
57 /* Strategy:
58 
59    This garbage-collecting allocator allocates objects on one of a set
60    of pages.  Each page can allocate objects of a single size only;
61    available sizes are powers of two starting at four bytes.  The size
62    of an allocation request is rounded up to the next power of two
63    (`order'), and satisfied from the appropriate page.
64 
65    Each page is recorded in a page-entry, which also maintains an
66    in-use bitmap of object positions on the page.  This allows the
67    allocation state of a particular object to be flipped without
68    touching the page itself.
69 
70    Each page-entry also has a context depth, which is used to track
71    pushing and popping of allocation contexts.  Only objects allocated
72    in the current (highest-numbered) context may be collected.
73 
74    Page entries are arranged in an array of singly-linked lists.  The
75    array is indexed by the allocation size, in bits, of the pages on
76    it; i.e. all pages on a list allocate objects of the same size.
77    Pages are ordered on the list such that all non-full pages precede
78    all full pages, with non-full pages arranged in order of decreasing
79    context depth.
80 
81    Empty pages (of all orders) are kept on a single page cache list,
82    and are considered first when new pages are required; they are
83    deallocated at the start of the next collection if they haven't
84    been recycled by then.  */
85 
86 /* Define GGC_DEBUG_LEVEL to print debugging information.
87      0: No debugging output.
88      1: GC statistics only.
89      2: Page-entry allocations/deallocations as well.
90      3: Object allocations as well.
91      4: Object marks as well.  */
92 #define GGC_DEBUG_LEVEL (0)
93 
94 /* A two-level tree is used to look up the page-entry for a given
95    pointer.  Two chunks of the pointer's bits are extracted to index
96    the first and second levels of the tree, as follows:
97 
98 				   HOST_PAGE_SIZE_BITS
99 			   32		|      |
100        msb +----------------+----+------+------+ lsb
101 			    |    |      |
102 			 PAGE_L1_BITS   |
103 				 |      |
104 			       PAGE_L2_BITS
105 
106    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
107    pages are aligned on system page boundaries.  The next most
108    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
109    index values in the lookup table, respectively.
110 
111    For 32-bit architectures and the settings below, there are no
112    leftover bits.  For architectures with wider pointers, the lookup
113    tree points to a list of pages, which must be scanned to find the
114    correct one.  */
115 
116 #define PAGE_L1_BITS	(8)
117 #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
118 #define PAGE_L1_SIZE	((uintptr_t) 1 << PAGE_L1_BITS)
119 #define PAGE_L2_SIZE	((uintptr_t) 1 << PAGE_L2_BITS)
120 
121 #define LOOKUP_L1(p) \
122   (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
123 
124 #define LOOKUP_L2(p) \
125   (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
126 
127 /* The number of objects per allocation page, for objects on a page of
128    the indicated ORDER.  */
129 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
130 
131 /* The number of objects in P.  */
132 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
133 
134 /* The size of an object on a page of the indicated ORDER.  */
135 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
136 
137 /* For speed, we avoid doing a general integer divide to locate the
138    offset in the allocation bitmap, by precalculating numbers M, S
139    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
140    within the page which is evenly divisible by the object size Z.  */
141 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
142 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
143 #define OFFSET_TO_BIT(OFFSET, ORDER) \
144   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
145 
146 /* We use this structure to determine the alignment required for
147    allocations.  For power-of-two sized allocations, that's not a
148    problem, but it does matter for odd-sized allocations.
149    We do not care about alignment for floating-point types.  */
150 
151 struct max_alignment {
152   char c;
153   union {
154     int64_t i;
155     void *p;
156   } u;
157 };
158 
159 /* The biggest alignment required.  */
160 
161 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
162 
163 
164 /* The number of extra orders, not corresponding to power-of-two sized
165    objects.  */
166 
167 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
168 
169 #define RTL_SIZE(NSLOTS) \
170   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
171 
172 #define TREE_EXP_SIZE(OPS) \
173   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
174 
175 /* The Ith entry is the maximum size of an object to be stored in the
176    Ith extra order.  Adding a new entry to this array is the *only*
177    thing you need to do to add a new special allocation size.  */
178 
179 static const size_t extra_order_size_table[] = {
180   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
181      There are a lot of structures with these sizes and explicitly
182      listing them risks orders being dropped because they changed size.  */
183   MAX_ALIGNMENT * 3,
184   MAX_ALIGNMENT * 5,
185   MAX_ALIGNMENT * 6,
186   MAX_ALIGNMENT * 7,
187   MAX_ALIGNMENT * 9,
188   MAX_ALIGNMENT * 10,
189   MAX_ALIGNMENT * 11,
190   MAX_ALIGNMENT * 12,
191   MAX_ALIGNMENT * 13,
192   MAX_ALIGNMENT * 14,
193   MAX_ALIGNMENT * 15,
194   sizeof (struct tree_decl_non_common),
195   sizeof (struct tree_field_decl),
196   sizeof (struct tree_parm_decl),
197   sizeof (struct tree_var_decl),
198   sizeof (struct tree_type_non_common),
199   sizeof (struct function),
200   sizeof (struct basic_block_def),
201   sizeof (struct cgraph_node),
202   sizeof (struct loop),
203 };
204 
205 /* The total number of orders.  */
206 
207 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
208 
209 /* Compute the smallest nonnegative number which when added to X gives
210    a multiple of F.  */
211 
212 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
213 
214 /* Round X to next multiple of the page size */
215 
216 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
217 
218 /* The Ith entry is the number of objects on a page or order I.  */
219 
220 static unsigned objects_per_page_table[NUM_ORDERS];
221 
222 /* The Ith entry is the size of an object on a page of order I.  */
223 
224 static size_t object_size_table[NUM_ORDERS];
225 
226 /* The Ith entry is a pair of numbers (mult, shift) such that
227    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
228    for all k evenly divisible by OBJECT_SIZE(I).  */
229 
230 static struct
231 {
232   size_t mult;
233   unsigned int shift;
234 }
235 inverse_table[NUM_ORDERS];
236 
237 /* A page_entry records the status of an allocation page.  This
238    structure is dynamically sized to fit the bitmap in_use_p.  */
239 struct page_entry
240 {
241   /* The next page-entry with objects of the same size, or NULL if
242      this is the last page-entry.  */
243   struct page_entry *next;
244 
245   /* The previous page-entry with objects of the same size, or NULL if
246      this is the first page-entry.   The PREV pointer exists solely to
247      keep the cost of ggc_free manageable.  */
248   struct page_entry *prev;
249 
250   /* The number of bytes allocated.  (This will always be a multiple
251      of the host system page size.)  */
252   size_t bytes;
253 
254   /* The address at which the memory is allocated.  */
255   char *page;
256 
257 #ifdef USING_MALLOC_PAGE_GROUPS
258   /* Back pointer to the page group this page came from.  */
259   struct page_group *group;
260 #endif
261 
262   /* This is the index in the by_depth varray where this page table
263      can be found.  */
264   unsigned long index_by_depth;
265 
266   /* Context depth of this page.  */
267   unsigned short context_depth;
268 
269   /* The number of free objects remaining on this page.  */
270   unsigned short num_free_objects;
271 
272   /* A likely candidate for the bit position of a free object for the
273      next allocation from this page.  */
274   unsigned short next_bit_hint;
275 
276   /* The lg of size of objects allocated from this page.  */
277   unsigned char order;
278 
279   /* Discarded page? */
280   bool discarded;
281 
282   /* A bit vector indicating whether or not objects are in use.  The
283      Nth bit is one if the Nth object on this page is allocated.  This
284      array is dynamically sized.  */
285   unsigned long in_use_p[1];
286 };
287 
288 #ifdef USING_MALLOC_PAGE_GROUPS
289 /* A page_group describes a large allocation from malloc, from which
290    we parcel out aligned pages.  */
291 struct page_group
292 {
293   /* A linked list of all extant page groups.  */
294   struct page_group *next;
295 
296   /* The address we received from malloc.  */
297   char *allocation;
298 
299   /* The size of the block.  */
300   size_t alloc_size;
301 
302   /* A bitmask of pages in use.  */
303   unsigned int in_use;
304 };
305 #endif
306 
307 #if HOST_BITS_PER_PTR <= 32
308 
309 /* On 32-bit hosts, we use a two level page table, as pictured above.  */
310 typedef page_entry **page_table[PAGE_L1_SIZE];
311 
312 #else
313 
314 /* On 64-bit hosts, we use the same two level page tables plus a linked
315    list that disambiguates the top 32-bits.  There will almost always be
316    exactly one entry in the list.  */
317 typedef struct page_table_chain
318 {
319   struct page_table_chain *next;
320   size_t high_bits;
321   page_entry **table[PAGE_L1_SIZE];
322 } *page_table;
323 
324 #endif
325 
326 class finalizer
327 {
328 public:
finalizer(void * addr,void (* f)(void *))329   finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
330 
addr()331   void *addr () const { return m_addr; }
332 
call()333   void call () const { m_function (m_addr); }
334 
335 private:
336   void *m_addr;
337   void (*m_function)(void *);
338 };
339 
340 class vec_finalizer
341 {
342 public:
vec_finalizer(uintptr_t addr,void (* f)(void *),size_t s,size_t n)343   vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
344     m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
345 
call()346   void call () const
347     {
348       for (size_t i = 0; i < m_n_objects; i++)
349 	m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
350     }
351 
addr()352   void *addr () const { return reinterpret_cast<void *> (m_addr); }
353 
354 private:
355   uintptr_t m_addr;
356   void (*m_function)(void *);
357   size_t m_object_size;
358   size_t m_n_objects;
359 };
360 
361 #ifdef ENABLE_GC_ALWAYS_COLLECT
362 /* List of free objects to be verified as actually free on the
363    next collection.  */
364 struct free_object
365 {
366   void *object;
367   struct free_object *next;
368 };
369 #endif
370 
371 /* The rest of the global variables.  */
372 static struct ggc_globals
373 {
374   /* The Nth element in this array is a page with objects of size 2^N.
375      If there are any pages with free objects, they will be at the
376      head of the list.  NULL if there are no page-entries for this
377      object size.  */
378   page_entry *pages[NUM_ORDERS];
379 
380   /* The Nth element in this array is the last page with objects of
381      size 2^N.  NULL if there are no page-entries for this object
382      size.  */
383   page_entry *page_tails[NUM_ORDERS];
384 
385   /* Lookup table for associating allocation pages with object addresses.  */
386   page_table lookup;
387 
388   /* The system's page size.  */
389   size_t pagesize;
390   size_t lg_pagesize;
391 
392   /* Bytes currently allocated.  */
393   size_t allocated;
394 
395   /* Bytes currently allocated at the end of the last collection.  */
396   size_t allocated_last_gc;
397 
398   /* Total amount of memory mapped.  */
399   size_t bytes_mapped;
400 
401   /* Bit N set if any allocations have been done at context depth N.  */
402   unsigned long context_depth_allocations;
403 
404   /* Bit N set if any collections have been done at context depth N.  */
405   unsigned long context_depth_collections;
406 
407   /* The current depth in the context stack.  */
408   unsigned short context_depth;
409 
410   /* A file descriptor open to /dev/zero for reading.  */
411 #if defined (HAVE_MMAP_DEV_ZERO)
412   int dev_zero_fd;
413 #endif
414 
415   /* A cache of free system pages.  */
416   page_entry *free_pages;
417 
418 #ifdef USING_MALLOC_PAGE_GROUPS
419   page_group *page_groups;
420 #endif
421 
422   /* The file descriptor for debugging output.  */
423   FILE *debug_file;
424 
425   /* Current number of elements in use in depth below.  */
426   unsigned int depth_in_use;
427 
428   /* Maximum number of elements that can be used before resizing.  */
429   unsigned int depth_max;
430 
431   /* Each element of this array is an index in by_depth where the given
432      depth starts.  This structure is indexed by that given depth we
433      are interested in.  */
434   unsigned int *depth;
435 
436   /* Current number of elements in use in by_depth below.  */
437   unsigned int by_depth_in_use;
438 
439   /* Maximum number of elements that can be used before resizing.  */
440   unsigned int by_depth_max;
441 
442   /* Each element of this array is a pointer to a page_entry, all
443      page_entries can be found in here by increasing depth.
444      index_by_depth in the page_entry is the index into this data
445      structure where that page_entry can be found.  This is used to
446      speed up finding all page_entries at a particular depth.  */
447   page_entry **by_depth;
448 
449   /* Each element is a pointer to the saved in_use_p bits, if any,
450      zero otherwise.  We allocate them all together, to enable a
451      better runtime data access pattern.  */
452   unsigned long **save_in_use;
453 
454   /* Finalizers for single objects.  The first index is collection_depth.  */
455   vec<vec<finalizer> > finalizers;
456 
457   /* Finalizers for vectors of objects.  */
458   vec<vec<vec_finalizer> > vec_finalizers;
459 
460 #ifdef ENABLE_GC_ALWAYS_COLLECT
461   /* List of free objects to be verified as actually free on the
462      next collection.  */
463   struct free_object *free_object_list;
464 #endif
465 
466   struct
467   {
468     /* Total GC-allocated memory.  */
469     unsigned long long total_allocated;
470     /* Total overhead for GC-allocated memory.  */
471     unsigned long long total_overhead;
472 
473     /* Total allocations and overhead for sizes less than 32, 64 and 128.
474        These sizes are interesting because they are typical cache line
475        sizes.  */
476 
477     unsigned long long total_allocated_under32;
478     unsigned long long total_overhead_under32;
479 
480     unsigned long long total_allocated_under64;
481     unsigned long long total_overhead_under64;
482 
483     unsigned long long total_allocated_under128;
484     unsigned long long total_overhead_under128;
485 
486     /* The allocations for each of the allocation orders.  */
487     unsigned long long total_allocated_per_order[NUM_ORDERS];
488 
489     /* The overhead for each of the allocation orders.  */
490     unsigned long long total_overhead_per_order[NUM_ORDERS];
491   } stats;
492 } G;
493 
494 /* True if a gc is currently taking place.  */
495 
496 static bool in_gc = false;
497 
498 /* The size in bytes required to maintain a bitmap for the objects
499    on a page-entry.  */
500 #define BITMAP_SIZE(Num_objects) \
501   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
502 
503 /* Allocate pages in chunks of this size, to throttle calls to memory
504    allocation routines.  The first page is used, the rest go onto the
505    free list.  This cannot be larger than HOST_BITS_PER_INT for the
506    in_use bitmask for page_group.  Hosts that need a different value
507    can override this by defining GGC_QUIRE_SIZE explicitly.  */
508 #ifndef GGC_QUIRE_SIZE
509 # ifdef USING_MMAP
510 #  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
511 # else
512 #  define GGC_QUIRE_SIZE 16
513 # endif
514 #endif
515 
516 /* Initial guess as to how many page table entries we might need.  */
517 #define INITIAL_PTE_COUNT 128
518 
519 static int ggc_allocated_p (const void *);
520 static page_entry *lookup_page_table_entry (const void *);
521 static void set_page_table_entry (void *, page_entry *);
522 #ifdef USING_MMAP
523 static char *alloc_anon (char *, size_t, bool check);
524 #endif
525 #ifdef USING_MALLOC_PAGE_GROUPS
526 static size_t page_group_index (char *, char *);
527 static void set_page_group_in_use (page_group *, char *);
528 static void clear_page_group_in_use (page_group *, char *);
529 #endif
530 static struct page_entry * alloc_page (unsigned);
531 static void free_page (struct page_entry *);
532 static void release_pages (void);
533 static void clear_marks (void);
534 static void sweep_pages (void);
535 static void ggc_recalculate_in_use_p (page_entry *);
536 static void compute_inverse (unsigned);
537 static inline void adjust_depth (void);
538 static void move_ptes_to_front (int, int);
539 
540 void debug_print_page_list (int);
541 static void push_depth (unsigned int);
542 static void push_by_depth (page_entry *, unsigned long *);
543 
544 /* Push an entry onto G.depth.  */
545 
546 inline static void
push_depth(unsigned int i)547 push_depth (unsigned int i)
548 {
549   if (G.depth_in_use >= G.depth_max)
550     {
551       G.depth_max *= 2;
552       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
553     }
554   G.depth[G.depth_in_use++] = i;
555 }
556 
557 /* Push an entry onto G.by_depth and G.save_in_use.  */
558 
559 inline static void
push_by_depth(page_entry * p,unsigned long * s)560 push_by_depth (page_entry *p, unsigned long *s)
561 {
562   if (G.by_depth_in_use >= G.by_depth_max)
563     {
564       G.by_depth_max *= 2;
565       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
566       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
567 				  G.by_depth_max);
568     }
569   G.by_depth[G.by_depth_in_use] = p;
570   G.save_in_use[G.by_depth_in_use++] = s;
571 }
572 
573 #if (GCC_VERSION < 3001)
574 #define prefetch(X) ((void) X)
575 #else
576 #define prefetch(X) __builtin_prefetch (X)
577 #endif
578 
579 #define save_in_use_p_i(__i) \
580   (G.save_in_use[__i])
581 #define save_in_use_p(__p) \
582   (save_in_use_p_i (__p->index_by_depth))
583 
584 /* Returns nonzero if P was allocated in GC'able memory.  */
585 
586 static inline int
ggc_allocated_p(const void * p)587 ggc_allocated_p (const void *p)
588 {
589   page_entry ***base;
590   size_t L1, L2;
591 
592 #if HOST_BITS_PER_PTR <= 32
593   base = &G.lookup[0];
594 #else
595   page_table table = G.lookup;
596   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
597   while (1)
598     {
599       if (table == NULL)
600 	return 0;
601       if (table->high_bits == high_bits)
602 	break;
603       table = table->next;
604     }
605   base = &table->table[0];
606 #endif
607 
608   /* Extract the level 1 and 2 indices.  */
609   L1 = LOOKUP_L1 (p);
610   L2 = LOOKUP_L2 (p);
611 
612   return base[L1] && base[L1][L2];
613 }
614 
615 /* Traverse the page table and find the entry for a page.
616    Die (probably) if the object wasn't allocated via GC.  */
617 
618 static inline page_entry *
lookup_page_table_entry(const void * p)619 lookup_page_table_entry (const void *p)
620 {
621   page_entry ***base;
622   size_t L1, L2;
623 
624 #if HOST_BITS_PER_PTR <= 32
625   base = &G.lookup[0];
626 #else
627   page_table table = G.lookup;
628   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
629   while (table->high_bits != high_bits)
630     table = table->next;
631   base = &table->table[0];
632 #endif
633 
634   /* Extract the level 1 and 2 indices.  */
635   L1 = LOOKUP_L1 (p);
636   L2 = LOOKUP_L2 (p);
637 
638   return base[L1][L2];
639 }
640 
641 /* Set the page table entry for a page.  */
642 
643 static void
set_page_table_entry(void * p,page_entry * entry)644 set_page_table_entry (void *p, page_entry *entry)
645 {
646   page_entry ***base;
647   size_t L1, L2;
648 
649 #if HOST_BITS_PER_PTR <= 32
650   base = &G.lookup[0];
651 #else
652   page_table table;
653   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
654   for (table = G.lookup; table; table = table->next)
655     if (table->high_bits == high_bits)
656       goto found;
657 
658   /* Not found -- allocate a new table.  */
659   table = XCNEW (struct page_table_chain);
660   table->next = G.lookup;
661   table->high_bits = high_bits;
662   G.lookup = table;
663 found:
664   base = &table->table[0];
665 #endif
666 
667   /* Extract the level 1 and 2 indices.  */
668   L1 = LOOKUP_L1 (p);
669   L2 = LOOKUP_L2 (p);
670 
671   if (base[L1] == NULL)
672     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
673 
674   base[L1][L2] = entry;
675 }
676 
677 /* Prints the page-entry for object size ORDER, for debugging.  */
678 
679 DEBUG_FUNCTION void
debug_print_page_list(int order)680 debug_print_page_list (int order)
681 {
682   page_entry *p;
683   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
684 	  (void *) G.page_tails[order]);
685   p = G.pages[order];
686   while (p != NULL)
687     {
688       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
689 	      p->num_free_objects);
690       p = p->next;
691     }
692   printf ("NULL\n");
693   fflush (stdout);
694 }
695 
696 #ifdef USING_MMAP
697 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
698    (if non-null).  The ifdef structure here is intended to cause a
699    compile error unless exactly one of the HAVE_* is defined.  */
700 
701 static inline char *
alloc_anon(char * pref ATTRIBUTE_UNUSED,size_t size,bool check)702 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
703 {
704 #ifdef HAVE_MMAP_ANON
705   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
706 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
707 #endif
708 #ifdef HAVE_MMAP_DEV_ZERO
709   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
710 			      MAP_PRIVATE, G.dev_zero_fd, 0);
711 #endif
712 
713   if (page == (char *) MAP_FAILED)
714     {
715       if (!check)
716         return NULL;
717       perror ("virtual memory exhausted");
718       exit (FATAL_EXIT_CODE);
719     }
720 
721   /* Remember that we allocated this memory.  */
722   G.bytes_mapped += size;
723 
724   /* Pretend we don't have access to the allocated pages.  We'll enable
725      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
726      handle to avoid handle leak.  */
727   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
728 
729   return page;
730 }
731 #endif
732 #ifdef USING_MALLOC_PAGE_GROUPS
733 /* Compute the index for this page into the page group.  */
734 
735 static inline size_t
page_group_index(char * allocation,char * page)736 page_group_index (char *allocation, char *page)
737 {
738   return (size_t) (page - allocation) >> G.lg_pagesize;
739 }
740 
741 /* Set and clear the in_use bit for this page in the page group.  */
742 
743 static inline void
set_page_group_in_use(page_group * group,char * page)744 set_page_group_in_use (page_group *group, char *page)
745 {
746   group->in_use |= 1 << page_group_index (group->allocation, page);
747 }
748 
749 static inline void
clear_page_group_in_use(page_group * group,char * page)750 clear_page_group_in_use (page_group *group, char *page)
751 {
752   group->in_use &= ~(1 << page_group_index (group->allocation, page));
753 }
754 #endif
755 
756 /* Allocate a new page for allocating objects of size 2^ORDER,
757    and return an entry for it.  The entry is not added to the
758    appropriate page_table list.  */
759 
760 static inline struct page_entry *
alloc_page(unsigned order)761 alloc_page (unsigned order)
762 {
763   struct page_entry *entry, *p, **pp;
764   char *page;
765   size_t num_objects;
766   size_t bitmap_size;
767   size_t page_entry_size;
768   size_t entry_size;
769 #ifdef USING_MALLOC_PAGE_GROUPS
770   page_group *group;
771 #endif
772 
773   num_objects = OBJECTS_PER_PAGE (order);
774   bitmap_size = BITMAP_SIZE (num_objects + 1);
775   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
776   entry_size = num_objects * OBJECT_SIZE (order);
777   if (entry_size < G.pagesize)
778     entry_size = G.pagesize;
779   entry_size = PAGE_ALIGN (entry_size);
780 
781   entry = NULL;
782   page = NULL;
783 
784   /* Check the list of free pages for one we can use.  */
785   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
786     if (p->bytes == entry_size)
787       break;
788 
789   if (p != NULL)
790     {
791       if (p->discarded)
792         G.bytes_mapped += p->bytes;
793       p->discarded = false;
794 
795       /* Recycle the allocated memory from this page ...  */
796       *pp = p->next;
797       page = p->page;
798 
799 #ifdef USING_MALLOC_PAGE_GROUPS
800       group = p->group;
801 #endif
802 
803       /* ... and, if possible, the page entry itself.  */
804       if (p->order == order)
805 	{
806 	  entry = p;
807 	  memset (entry, 0, page_entry_size);
808 	}
809       else
810 	free (p);
811     }
812 #ifdef USING_MMAP
813   else if (entry_size == G.pagesize)
814     {
815       /* We want just one page.  Allocate a bunch of them and put the
816 	 extras on the freelist.  (Can only do this optimization with
817 	 mmap for backing store.)  */
818       struct page_entry *e, *f = G.free_pages;
819       int i, entries = GGC_QUIRE_SIZE;
820 
821       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
822       if (page == NULL)
823      	{
824 	  page = alloc_anon (NULL, G.pagesize, true);
825           entries = 1;
826 	}
827 
828       /* This loop counts down so that the chain will be in ascending
829 	 memory order.  */
830       for (i = entries - 1; i >= 1; i--)
831 	{
832 	  e = XCNEWVAR (struct page_entry, page_entry_size);
833 	  e->order = order;
834 	  e->bytes = G.pagesize;
835 	  e->page = page + (i << G.lg_pagesize);
836 	  e->next = f;
837 	  f = e;
838 	}
839 
840       G.free_pages = f;
841     }
842   else
843     page = alloc_anon (NULL, entry_size, true);
844 #endif
845 #ifdef USING_MALLOC_PAGE_GROUPS
846   else
847     {
848       /* Allocate a large block of memory and serve out the aligned
849 	 pages therein.  This results in much less memory wastage
850 	 than the traditional implementation of valloc.  */
851 
852       char *allocation, *a, *enda;
853       size_t alloc_size, head_slop, tail_slop;
854       int multiple_pages = (entry_size == G.pagesize);
855 
856       if (multiple_pages)
857 	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
858       else
859 	alloc_size = entry_size + G.pagesize - 1;
860       allocation = XNEWVEC (char, alloc_size);
861 
862       page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
863       head_slop = page - allocation;
864       if (multiple_pages)
865 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
866       else
867 	tail_slop = alloc_size - entry_size - head_slop;
868       enda = allocation + alloc_size - tail_slop;
869 
870       /* We allocated N pages, which are likely not aligned, leaving
871 	 us with N-1 usable pages.  We plan to place the page_group
872 	 structure somewhere in the slop.  */
873       if (head_slop >= sizeof (page_group))
874 	group = (page_group *)page - 1;
875       else
876 	{
877 	  /* We magically got an aligned allocation.  Too bad, we have
878 	     to waste a page anyway.  */
879 	  if (tail_slop == 0)
880 	    {
881 	      enda -= G.pagesize;
882 	      tail_slop += G.pagesize;
883 	    }
884 	  gcc_assert (tail_slop >= sizeof (page_group));
885 	  group = (page_group *)enda;
886 	  tail_slop -= sizeof (page_group);
887 	}
888 
889       /* Remember that we allocated this memory.  */
890       group->next = G.page_groups;
891       group->allocation = allocation;
892       group->alloc_size = alloc_size;
893       group->in_use = 0;
894       G.page_groups = group;
895       G.bytes_mapped += alloc_size;
896 
897       /* If we allocated multiple pages, put the rest on the free list.  */
898       if (multiple_pages)
899 	{
900 	  struct page_entry *e, *f = G.free_pages;
901 	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
902 	    {
903 	      e = XCNEWVAR (struct page_entry, page_entry_size);
904 	      e->order = order;
905 	      e->bytes = G.pagesize;
906 	      e->page = a;
907 	      e->group = group;
908 	      e->next = f;
909 	      f = e;
910 	    }
911 	  G.free_pages = f;
912 	}
913     }
914 #endif
915 
916   if (entry == NULL)
917     entry = XCNEWVAR (struct page_entry, page_entry_size);
918 
919   entry->bytes = entry_size;
920   entry->page = page;
921   entry->context_depth = G.context_depth;
922   entry->order = order;
923   entry->num_free_objects = num_objects;
924   entry->next_bit_hint = 1;
925 
926   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
927 
928 #ifdef USING_MALLOC_PAGE_GROUPS
929   entry->group = group;
930   set_page_group_in_use (group, page);
931 #endif
932 
933   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
934      increment the hint.  */
935   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
936     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
937 
938   set_page_table_entry (page, entry);
939 
940   if (GGC_DEBUG_LEVEL >= 2)
941     fprintf (G.debug_file,
942 	     "Allocating page at %p, object size=%lu, data %p-%p\n",
943 	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
944 	     page + entry_size - 1);
945 
946   return entry;
947 }
948 
949 /* Adjust the size of G.depth so that no index greater than the one
950    used by the top of the G.by_depth is used.  */
951 
952 static inline void
adjust_depth(void)953 adjust_depth (void)
954 {
955   page_entry *top;
956 
957   if (G.by_depth_in_use)
958     {
959       top = G.by_depth[G.by_depth_in_use-1];
960 
961       /* Peel back indices in depth that index into by_depth, so that
962 	 as new elements are added to by_depth, we note the indices
963 	 of those elements, if they are for new context depths.  */
964       while (G.depth_in_use > (size_t)top->context_depth+1)
965 	--G.depth_in_use;
966     }
967 }
968 
969 /* For a page that is no longer needed, put it on the free page list.  */
970 
971 static void
free_page(page_entry * entry)972 free_page (page_entry *entry)
973 {
974   if (GGC_DEBUG_LEVEL >= 2)
975     fprintf (G.debug_file,
976 	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
977 	     entry->page, entry->page + entry->bytes - 1);
978 
979   /* Mark the page as inaccessible.  Discard the handle to avoid handle
980      leak.  */
981   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
982 
983   set_page_table_entry (entry->page, NULL);
984 
985 #ifdef USING_MALLOC_PAGE_GROUPS
986   clear_page_group_in_use (entry->group, entry->page);
987 #endif
988 
989   if (G.by_depth_in_use > 1)
990     {
991       page_entry *top = G.by_depth[G.by_depth_in_use-1];
992       int i = entry->index_by_depth;
993 
994       /* We cannot free a page from a context deeper than the current
995 	 one.  */
996       gcc_assert (entry->context_depth == top->context_depth);
997 
998       /* Put top element into freed slot.  */
999       G.by_depth[i] = top;
1000       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1001       top->index_by_depth = i;
1002     }
1003   --G.by_depth_in_use;
1004 
1005   adjust_depth ();
1006 
1007   entry->next = G.free_pages;
1008   G.free_pages = entry;
1009 }
1010 
1011 /* Release the free page cache to the system.  */
1012 
1013 static void
release_pages(void)1014 release_pages (void)
1015 {
1016 #ifdef USING_MADVISE
1017   page_entry *p, *start_p;
1018   char *start;
1019   size_t len;
1020   size_t mapped_len;
1021   page_entry *next, *prev, *newprev;
1022   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1023 
1024   /* First free larger continuous areas to the OS.
1025      This allows other allocators to grab these areas if needed.
1026      This is only done on larger chunks to avoid fragmentation.
1027      This does not always work because the free_pages list is only
1028      approximately sorted. */
1029 
1030   p = G.free_pages;
1031   prev = NULL;
1032   while (p)
1033     {
1034       start = p->page;
1035       start_p = p;
1036       len = 0;
1037       mapped_len = 0;
1038       newprev = prev;
1039       while (p && p->page == start + len)
1040         {
1041           len += p->bytes;
1042 	  if (!p->discarded)
1043 	      mapped_len += p->bytes;
1044 	  newprev = p;
1045           p = p->next;
1046         }
1047       if (len >= free_unit)
1048         {
1049           while (start_p != p)
1050             {
1051               next = start_p->next;
1052               free (start_p);
1053               start_p = next;
1054             }
1055           munmap (start, len);
1056 	  if (prev)
1057 	    prev->next = p;
1058           else
1059             G.free_pages = p;
1060           G.bytes_mapped -= mapped_len;
1061 	  continue;
1062         }
1063       prev = newprev;
1064    }
1065 
1066   /* Now give back the fragmented pages to the OS, but keep the address
1067      space to reuse it next time. */
1068 
1069   for (p = G.free_pages; p; )
1070     {
1071       if (p->discarded)
1072         {
1073           p = p->next;
1074           continue;
1075         }
1076       start = p->page;
1077       len = p->bytes;
1078       start_p = p;
1079       p = p->next;
1080       while (p && p->page == start + len)
1081         {
1082           len += p->bytes;
1083           p = p->next;
1084         }
1085       /* Give the page back to the kernel, but don't free the mapping.
1086          This avoids fragmentation in the virtual memory map of the
1087  	 process. Next time we can reuse it by just touching it. */
1088       madvise (start, len, MADV_DONTNEED);
1089       /* Don't count those pages as mapped to not touch the garbage collector
1090          unnecessarily. */
1091       G.bytes_mapped -= len;
1092       while (start_p != p)
1093         {
1094           start_p->discarded = true;
1095           start_p = start_p->next;
1096         }
1097     }
1098 #endif
1099 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1100   page_entry *p, *next;
1101   char *start;
1102   size_t len;
1103 
1104   /* Gather up adjacent pages so they are unmapped together.  */
1105   p = G.free_pages;
1106 
1107   while (p)
1108     {
1109       start = p->page;
1110       next = p->next;
1111       len = p->bytes;
1112       free (p);
1113       p = next;
1114 
1115       while (p && p->page == start + len)
1116 	{
1117 	  next = p->next;
1118 	  len += p->bytes;
1119 	  free (p);
1120 	  p = next;
1121 	}
1122 
1123       munmap (start, len);
1124       G.bytes_mapped -= len;
1125     }
1126 
1127   G.free_pages = NULL;
1128 #endif
1129 #ifdef USING_MALLOC_PAGE_GROUPS
1130   page_entry **pp, *p;
1131   page_group **gp, *g;
1132 
1133   /* Remove all pages from free page groups from the list.  */
1134   pp = &G.free_pages;
1135   while ((p = *pp) != NULL)
1136     if (p->group->in_use == 0)
1137       {
1138 	*pp = p->next;
1139 	free (p);
1140       }
1141     else
1142       pp = &p->next;
1143 
1144   /* Remove all free page groups, and release the storage.  */
1145   gp = &G.page_groups;
1146   while ((g = *gp) != NULL)
1147     if (g->in_use == 0)
1148       {
1149 	*gp = g->next;
1150 	G.bytes_mapped -= g->alloc_size;
1151 	free (g->allocation);
1152       }
1153     else
1154       gp = &g->next;
1155 #endif
1156 }
1157 
1158 /* This table provides a fast way to determine ceil(log_2(size)) for
1159    allocation requests.  The minimum allocation size is eight bytes.  */
1160 #define NUM_SIZE_LOOKUP 512
1161 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1162 {
1163   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1164   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1165   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1166   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1167   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1168   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1169   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1170   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1171   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1172   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1173   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1174   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1175   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1176   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1177   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1178   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1179   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1180   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1181   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1182   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1183   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1184   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1185   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1186   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1187   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1188   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1189   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1190   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1191   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1192   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1193   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1194   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1195 };
1196 
1197 /* For a given size of memory requested for allocation, return the
1198    actual size that is going to be allocated, as well as the size
1199    order.  */
1200 
1201 static void
ggc_round_alloc_size_1(size_t requested_size,size_t * size_order,size_t * alloced_size)1202 ggc_round_alloc_size_1 (size_t requested_size,
1203 			size_t *size_order,
1204 			size_t *alloced_size)
1205 {
1206   size_t order, object_size;
1207 
1208   if (requested_size < NUM_SIZE_LOOKUP)
1209     {
1210       order = size_lookup[requested_size];
1211       object_size = OBJECT_SIZE (order);
1212     }
1213   else
1214     {
1215       order = 10;
1216       while (requested_size > (object_size = OBJECT_SIZE (order)))
1217         order++;
1218     }
1219 
1220   if (size_order)
1221     *size_order = order;
1222   if (alloced_size)
1223     *alloced_size = object_size;
1224 }
1225 
1226 /* For a given size of memory requested for allocation, return the
1227    actual size that is going to be allocated.  */
1228 
1229 size_t
ggc_round_alloc_size(size_t requested_size)1230 ggc_round_alloc_size (size_t requested_size)
1231 {
1232   size_t size = 0;
1233 
1234   ggc_round_alloc_size_1 (requested_size, NULL, &size);
1235   return size;
1236 }
1237 
1238 /* Push a finalizer onto the appropriate vec.  */
1239 
1240 static void
add_finalizer(void * result,void (* f)(void *),size_t s,size_t n)1241 add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1242 {
1243   if (f == NULL)
1244     /* No finalizer.  */;
1245   else if (n == 1)
1246     {
1247       finalizer fin (result, f);
1248       G.finalizers[G.context_depth].safe_push (fin);
1249     }
1250   else
1251     {
1252       vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1253       G.vec_finalizers[G.context_depth].safe_push (fin);
1254     }
1255 }
1256 
1257 /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1258 
1259 void *
ggc_internal_alloc(size_t size,void (* f)(void *),size_t s,size_t n MEM_STAT_DECL)1260 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1261 		    MEM_STAT_DECL)
1262 {
1263   size_t order, word, bit, object_offset, object_size;
1264   struct page_entry *entry;
1265   void *result;
1266 
1267   ggc_round_alloc_size_1 (size, &order, &object_size);
1268 
1269   /* If there are non-full pages for this size allocation, they are at
1270      the head of the list.  */
1271   entry = G.pages[order];
1272 
1273   /* If there is no page for this object size, or all pages in this
1274      context are full, allocate a new page.  */
1275   if (entry == NULL || entry->num_free_objects == 0)
1276     {
1277       struct page_entry *new_entry;
1278       new_entry = alloc_page (order);
1279 
1280       new_entry->index_by_depth = G.by_depth_in_use;
1281       push_by_depth (new_entry, 0);
1282 
1283       /* We can skip context depths, if we do, make sure we go all the
1284 	 way to the new depth.  */
1285       while (new_entry->context_depth >= G.depth_in_use)
1286 	push_depth (G.by_depth_in_use-1);
1287 
1288       /* If this is the only entry, it's also the tail.  If it is not
1289 	 the only entry, then we must update the PREV pointer of the
1290 	 ENTRY (G.pages[order]) to point to our new page entry.  */
1291       if (entry == NULL)
1292 	G.page_tails[order] = new_entry;
1293       else
1294 	entry->prev = new_entry;
1295 
1296       /* Put new pages at the head of the page list.  By definition the
1297 	 entry at the head of the list always has a NULL pointer.  */
1298       new_entry->next = entry;
1299       new_entry->prev = NULL;
1300       entry = new_entry;
1301       G.pages[order] = new_entry;
1302 
1303       /* For a new page, we know the word and bit positions (in the
1304 	 in_use bitmap) of the first available object -- they're zero.  */
1305       new_entry->next_bit_hint = 1;
1306       word = 0;
1307       bit = 0;
1308       object_offset = 0;
1309     }
1310   else
1311     {
1312       /* First try to use the hint left from the previous allocation
1313 	 to locate a clear bit in the in-use bitmap.  We've made sure
1314 	 that the one-past-the-end bit is always set, so if the hint
1315 	 has run over, this test will fail.  */
1316       unsigned hint = entry->next_bit_hint;
1317       word = hint / HOST_BITS_PER_LONG;
1318       bit = hint % HOST_BITS_PER_LONG;
1319 
1320       /* If the hint didn't work, scan the bitmap from the beginning.  */
1321       if ((entry->in_use_p[word] >> bit) & 1)
1322 	{
1323 	  word = bit = 0;
1324 	  while (~entry->in_use_p[word] == 0)
1325 	    ++word;
1326 
1327 #if GCC_VERSION >= 3004
1328 	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1329 #else
1330 	  while ((entry->in_use_p[word] >> bit) & 1)
1331 	    ++bit;
1332 #endif
1333 
1334 	  hint = word * HOST_BITS_PER_LONG + bit;
1335 	}
1336 
1337       /* Next time, try the next bit.  */
1338       entry->next_bit_hint = hint + 1;
1339 
1340       object_offset = hint * object_size;
1341     }
1342 
1343   /* Set the in-use bit.  */
1344   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1345 
1346   /* Keep a running total of the number of free objects.  If this page
1347      fills up, we may have to move it to the end of the list if the
1348      next page isn't full.  If the next page is full, all subsequent
1349      pages are full, so there's no need to move it.  */
1350   if (--entry->num_free_objects == 0
1351       && entry->next != NULL
1352       && entry->next->num_free_objects > 0)
1353     {
1354       /* We have a new head for the list.  */
1355       G.pages[order] = entry->next;
1356 
1357       /* We are moving ENTRY to the end of the page table list.
1358 	 The new page at the head of the list will have NULL in
1359 	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1360       entry->next->prev = NULL;
1361       entry->next = NULL;
1362 
1363       /* Append ENTRY to the tail of the list.  */
1364       entry->prev = G.page_tails[order];
1365       G.page_tails[order]->next = entry;
1366       G.page_tails[order] = entry;
1367     }
1368 
1369   /* Calculate the object's address.  */
1370   result = entry->page + object_offset;
1371   if (GATHER_STATISTICS)
1372     ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1373 			 result FINAL_PASS_MEM_STAT);
1374 
1375 #ifdef ENABLE_GC_CHECKING
1376   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1377      exact same semantics in presence of memory bugs, regardless of
1378      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1379      handle to avoid handle leak.  */
1380   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1381 
1382   /* `Poison' the entire allocated object, including any padding at
1383      the end.  */
1384   memset (result, 0xaf, object_size);
1385 
1386   /* Make the bytes after the end of the object unaccessible.  Discard the
1387      handle to avoid handle leak.  */
1388   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1389 						object_size - size));
1390 #endif
1391 
1392   /* Tell Valgrind that the memory is there, but its content isn't
1393      defined.  The bytes at the end of the object are still marked
1394      unaccessible.  */
1395   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1396 
1397   /* Keep track of how many bytes are being allocated.  This
1398      information is used in deciding when to collect.  */
1399   G.allocated += object_size;
1400 
1401   /* For timevar statistics.  */
1402   timevar_ggc_mem_total += object_size;
1403 
1404   if (f)
1405     add_finalizer (result, f, s, n);
1406 
1407   if (GATHER_STATISTICS)
1408     {
1409       size_t overhead = object_size - size;
1410 
1411       G.stats.total_overhead += overhead;
1412       G.stats.total_allocated += object_size;
1413       G.stats.total_overhead_per_order[order] += overhead;
1414       G.stats.total_allocated_per_order[order] += object_size;
1415 
1416       if (size <= 32)
1417 	{
1418 	  G.stats.total_overhead_under32 += overhead;
1419 	  G.stats.total_allocated_under32 += object_size;
1420 	}
1421       if (size <= 64)
1422 	{
1423 	  G.stats.total_overhead_under64 += overhead;
1424 	  G.stats.total_allocated_under64 += object_size;
1425 	}
1426       if (size <= 128)
1427 	{
1428 	  G.stats.total_overhead_under128 += overhead;
1429 	  G.stats.total_allocated_under128 += object_size;
1430 	}
1431     }
1432 
1433   if (GGC_DEBUG_LEVEL >= 3)
1434     fprintf (G.debug_file,
1435 	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1436 	     (unsigned long) size, (unsigned long) object_size, result,
1437 	     (void *) entry);
1438 
1439   return result;
1440 }
1441 
1442 /* Mark function for strings.  */
1443 
1444 void
gt_ggc_m_S(const void * p)1445 gt_ggc_m_S (const void *p)
1446 {
1447   page_entry *entry;
1448   unsigned bit, word;
1449   unsigned long mask;
1450   unsigned long offset;
1451 
1452   if (!p || !ggc_allocated_p (p))
1453     return;
1454 
1455   /* Look up the page on which the object is alloced.  .  */
1456   entry = lookup_page_table_entry (p);
1457   gcc_assert (entry);
1458 
1459   /* Calculate the index of the object on the page; this is its bit
1460      position in the in_use_p bitmap.  Note that because a char* might
1461      point to the middle of an object, we need special code here to
1462      make sure P points to the start of an object.  */
1463   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1464   if (offset)
1465     {
1466       /* Here we've seen a char* which does not point to the beginning
1467 	 of an allocated object.  We assume it points to the middle of
1468 	 a STRING_CST.  */
1469       gcc_assert (offset == offsetof (struct tree_string, str));
1470       p = ((const char *) p) - offset;
1471       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1472       return;
1473     }
1474 
1475   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1476   word = bit / HOST_BITS_PER_LONG;
1477   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1478 
1479   /* If the bit was previously set, skip it.  */
1480   if (entry->in_use_p[word] & mask)
1481     return;
1482 
1483   /* Otherwise set it, and decrement the free object count.  */
1484   entry->in_use_p[word] |= mask;
1485   entry->num_free_objects -= 1;
1486 
1487   if (GGC_DEBUG_LEVEL >= 4)
1488     fprintf (G.debug_file, "Marking %p\n", p);
1489 
1490   return;
1491 }
1492 
1493 
1494 /* User-callable entry points for marking string X.  */
1495 
1496 void
gt_ggc_mx(const char * & x)1497 gt_ggc_mx (const char *& x)
1498 {
1499   gt_ggc_m_S (x);
1500 }
1501 
1502 void
gt_ggc_mx(unsigned char * & x)1503 gt_ggc_mx (unsigned char *& x)
1504 {
1505   gt_ggc_m_S (x);
1506 }
1507 
1508 void
gt_ggc_mx(unsigned char & x ATTRIBUTE_UNUSED)1509 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1510 {
1511 }
1512 
1513 /* If P is not marked, marks it and return false.  Otherwise return true.
1514    P must have been allocated by the GC allocator; it mustn't point to
1515    static objects, stack variables, or memory allocated with malloc.  */
1516 
1517 int
ggc_set_mark(const void * p)1518 ggc_set_mark (const void *p)
1519 {
1520   page_entry *entry;
1521   unsigned bit, word;
1522   unsigned long mask;
1523 
1524   /* Look up the page on which the object is alloced.  If the object
1525      wasn't allocated by the collector, we'll probably die.  */
1526   entry = lookup_page_table_entry (p);
1527   gcc_assert (entry);
1528 
1529   /* Calculate the index of the object on the page; this is its bit
1530      position in the in_use_p bitmap.  */
1531   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1532   word = bit / HOST_BITS_PER_LONG;
1533   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1534 
1535   /* If the bit was previously set, skip it.  */
1536   if (entry->in_use_p[word] & mask)
1537     return 1;
1538 
1539   /* Otherwise set it, and decrement the free object count.  */
1540   entry->in_use_p[word] |= mask;
1541   entry->num_free_objects -= 1;
1542 
1543   if (GGC_DEBUG_LEVEL >= 4)
1544     fprintf (G.debug_file, "Marking %p\n", p);
1545 
1546   return 0;
1547 }
1548 
1549 /* Return 1 if P has been marked, zero otherwise.
1550    P must have been allocated by the GC allocator; it mustn't point to
1551    static objects, stack variables, or memory allocated with malloc.  */
1552 
1553 int
ggc_marked_p(const void * p)1554 ggc_marked_p (const void *p)
1555 {
1556   page_entry *entry;
1557   unsigned bit, word;
1558   unsigned long mask;
1559 
1560   /* Look up the page on which the object is alloced.  If the object
1561      wasn't allocated by the collector, we'll probably die.  */
1562   entry = lookup_page_table_entry (p);
1563   gcc_assert (entry);
1564 
1565   /* Calculate the index of the object on the page; this is its bit
1566      position in the in_use_p bitmap.  */
1567   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1568   word = bit / HOST_BITS_PER_LONG;
1569   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1570 
1571   return (entry->in_use_p[word] & mask) != 0;
1572 }
1573 
1574 /* Return the size of the gc-able object P.  */
1575 
1576 size_t
ggc_get_size(const void * p)1577 ggc_get_size (const void *p)
1578 {
1579   page_entry *pe = lookup_page_table_entry (p);
1580   return OBJECT_SIZE (pe->order);
1581 }
1582 
1583 /* Release the memory for object P.  */
1584 
1585 void
ggc_free(void * p)1586 ggc_free (void *p)
1587 {
1588   if (in_gc)
1589     return;
1590 
1591   page_entry *pe = lookup_page_table_entry (p);
1592   size_t order = pe->order;
1593   size_t size = OBJECT_SIZE (order);
1594 
1595   if (GATHER_STATISTICS)
1596     ggc_free_overhead (p);
1597 
1598   if (GGC_DEBUG_LEVEL >= 3)
1599     fprintf (G.debug_file,
1600 	     "Freeing object, actual size=%lu, at %p on %p\n",
1601 	     (unsigned long) size, p, (void *) pe);
1602 
1603 #ifdef ENABLE_GC_CHECKING
1604   /* Poison the data, to indicate the data is garbage.  */
1605   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1606   memset (p, 0xa5, size);
1607 #endif
1608   /* Let valgrind know the object is free.  */
1609   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1610 
1611 #ifdef ENABLE_GC_ALWAYS_COLLECT
1612   /* In the completely-anal-checking mode, we do *not* immediately free
1613      the data, but instead verify that the data is *actually* not
1614      reachable the next time we collect.  */
1615   {
1616     struct free_object *fo = XNEW (struct free_object);
1617     fo->object = p;
1618     fo->next = G.free_object_list;
1619     G.free_object_list = fo;
1620   }
1621 #else
1622   {
1623     unsigned int bit_offset, word, bit;
1624 
1625     G.allocated -= size;
1626 
1627     /* Mark the object not-in-use.  */
1628     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1629     word = bit_offset / HOST_BITS_PER_LONG;
1630     bit = bit_offset % HOST_BITS_PER_LONG;
1631     pe->in_use_p[word] &= ~(1UL << bit);
1632 
1633     if (pe->num_free_objects++ == 0)
1634       {
1635 	page_entry *p, *q;
1636 
1637 	/* If the page is completely full, then it's supposed to
1638 	   be after all pages that aren't.  Since we've freed one
1639 	   object from a page that was full, we need to move the
1640 	   page to the head of the list.
1641 
1642 	   PE is the node we want to move.  Q is the previous node
1643 	   and P is the next node in the list.  */
1644 	q = pe->prev;
1645 	if (q && q->num_free_objects == 0)
1646 	  {
1647 	    p = pe->next;
1648 
1649 	    q->next = p;
1650 
1651 	    /* If PE was at the end of the list, then Q becomes the
1652 	       new end of the list.  If PE was not the end of the
1653 	       list, then we need to update the PREV field for P.  */
1654 	    if (!p)
1655 	      G.page_tails[order] = q;
1656 	    else
1657 	      p->prev = q;
1658 
1659 	    /* Move PE to the head of the list.  */
1660 	    pe->next = G.pages[order];
1661 	    pe->prev = NULL;
1662 	    G.pages[order]->prev = pe;
1663 	    G.pages[order] = pe;
1664 	  }
1665 
1666 	/* Reset the hint bit to point to the only free object.  */
1667 	pe->next_bit_hint = bit_offset;
1668       }
1669   }
1670 #endif
1671 }
1672 
1673 /* Subroutine of init_ggc which computes the pair of numbers used to
1674    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1675 
1676    This algorithm is taken from Granlund and Montgomery's paper
1677    "Division by Invariant Integers using Multiplication"
1678    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1679    constants).  */
1680 
1681 static void
compute_inverse(unsigned order)1682 compute_inverse (unsigned order)
1683 {
1684   size_t size, inv;
1685   unsigned int e;
1686 
1687   size = OBJECT_SIZE (order);
1688   e = 0;
1689   while (size % 2 == 0)
1690     {
1691       e++;
1692       size >>= 1;
1693     }
1694 
1695   inv = size;
1696   while (inv * size != 1)
1697     inv = inv * (2 - inv*size);
1698 
1699   DIV_MULT (order) = inv;
1700   DIV_SHIFT (order) = e;
1701 }
1702 
1703 /* Initialize the ggc-mmap allocator.  */
1704 void
init_ggc(void)1705 init_ggc (void)
1706 {
1707   static bool init_p = false;
1708   unsigned order;
1709 
1710   if (init_p)
1711     return;
1712   init_p = true;
1713 
1714   G.pagesize = getpagesize ();
1715   G.lg_pagesize = exact_log2 (G.pagesize);
1716 
1717 #ifdef HAVE_MMAP_DEV_ZERO
1718   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1719   if (G.dev_zero_fd == -1)
1720     internal_error ("open /dev/zero: %m");
1721 #endif
1722 
1723 #if 0
1724   G.debug_file = fopen ("ggc-mmap.debug", "w");
1725 #else
1726   G.debug_file = stdout;
1727 #endif
1728 
1729 #ifdef USING_MMAP
1730   /* StunOS has an amazing off-by-one error for the first mmap allocation
1731      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1732      believe, is an unaligned page allocation, which would cause us to
1733      hork badly if we tried to use it.  */
1734   {
1735     char *p = alloc_anon (NULL, G.pagesize, true);
1736     struct page_entry *e;
1737     if ((uintptr_t)p & (G.pagesize - 1))
1738       {
1739 	/* How losing.  Discard this one and try another.  If we still
1740 	   can't get something useful, give up.  */
1741 
1742 	p = alloc_anon (NULL, G.pagesize, true);
1743 	gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1744       }
1745 
1746     /* We have a good page, might as well hold onto it...  */
1747     e = XCNEW (struct page_entry);
1748     e->bytes = G.pagesize;
1749     e->page = p;
1750     e->next = G.free_pages;
1751     G.free_pages = e;
1752   }
1753 #endif
1754 
1755   /* Initialize the object size table.  */
1756   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1757     object_size_table[order] = (size_t) 1 << order;
1758   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1759     {
1760       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1761 
1762       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1763 	 so that we're sure of getting aligned memory.  */
1764       s = ROUND_UP (s, MAX_ALIGNMENT);
1765       object_size_table[order] = s;
1766     }
1767 
1768   /* Initialize the objects-per-page and inverse tables.  */
1769   for (order = 0; order < NUM_ORDERS; ++order)
1770     {
1771       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1772       if (objects_per_page_table[order] == 0)
1773 	objects_per_page_table[order] = 1;
1774       compute_inverse (order);
1775     }
1776 
1777   /* Reset the size_lookup array to put appropriately sized objects in
1778      the special orders.  All objects bigger than the previous power
1779      of two, but no greater than the special size, should go in the
1780      new order.  */
1781   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1782     {
1783       int o;
1784       int i;
1785 
1786       i = OBJECT_SIZE (order);
1787       if (i >= NUM_SIZE_LOOKUP)
1788 	continue;
1789 
1790       for (o = size_lookup[i]; o == size_lookup [i]; --i)
1791 	size_lookup[i] = order;
1792     }
1793 
1794   G.depth_in_use = 0;
1795   G.depth_max = 10;
1796   G.depth = XNEWVEC (unsigned int, G.depth_max);
1797 
1798   G.by_depth_in_use = 0;
1799   G.by_depth_max = INITIAL_PTE_COUNT;
1800   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1801   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1802 
1803   /* Allocate space for the depth 0 finalizers.  */
1804   G.finalizers.safe_push (vNULL);
1805   G.vec_finalizers.safe_push (vNULL);
1806   gcc_assert (G.finalizers.length() == 1);
1807 }
1808 
1809 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1810    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1811 
1812 static void
ggc_recalculate_in_use_p(page_entry * p)1813 ggc_recalculate_in_use_p (page_entry *p)
1814 {
1815   unsigned int i;
1816   size_t num_objects;
1817 
1818   /* Because the past-the-end bit in in_use_p is always set, we
1819      pretend there is one additional object.  */
1820   num_objects = OBJECTS_IN_PAGE (p) + 1;
1821 
1822   /* Reset the free object count.  */
1823   p->num_free_objects = num_objects;
1824 
1825   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1826   for (i = 0;
1827        i < CEIL (BITMAP_SIZE (num_objects),
1828 		 sizeof (*p->in_use_p));
1829        ++i)
1830     {
1831       unsigned long j;
1832 
1833       /* Something is in use if it is marked, or if it was in use in a
1834 	 context further down the context stack.  */
1835       p->in_use_p[i] |= save_in_use_p (p)[i];
1836 
1837       /* Decrement the free object count for every object allocated.  */
1838       for (j = p->in_use_p[i]; j; j >>= 1)
1839 	p->num_free_objects -= (j & 1);
1840     }
1841 
1842   gcc_assert (p->num_free_objects < num_objects);
1843 }
1844 
1845 /* Unmark all objects.  */
1846 
1847 static void
clear_marks(void)1848 clear_marks (void)
1849 {
1850   unsigned order;
1851 
1852   for (order = 2; order < NUM_ORDERS; order++)
1853     {
1854       page_entry *p;
1855 
1856       for (p = G.pages[order]; p != NULL; p = p->next)
1857 	{
1858 	  size_t num_objects = OBJECTS_IN_PAGE (p);
1859 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1860 
1861 	  /* The data should be page-aligned.  */
1862 	  gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1863 
1864 	  /* Pages that aren't in the topmost context are not collected;
1865 	     nevertheless, we need their in-use bit vectors to store GC
1866 	     marks.  So, back them up first.  */
1867 	  if (p->context_depth < G.context_depth)
1868 	    {
1869 	      if (! save_in_use_p (p))
1870 		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1871 	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1872 	    }
1873 
1874 	  /* Reset reset the number of free objects and clear the
1875              in-use bits.  These will be adjusted by mark_obj.  */
1876 	  p->num_free_objects = num_objects;
1877 	  memset (p->in_use_p, 0, bitmap_size);
1878 
1879 	  /* Make sure the one-past-the-end bit is always set.  */
1880 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1881 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1882 	}
1883     }
1884 }
1885 
1886 /* Check if any blocks with a registered finalizer have become unmarked. If so
1887    run the finalizer and unregister it because the block is about to be freed.
1888    Note that no garantee is made about what order finalizers will run in so
1889    touching other objects in gc memory is extremely unwise.  */
1890 
1891 static void
ggc_handle_finalizers()1892 ggc_handle_finalizers ()
1893 {
1894   unsigned dlen = G.finalizers.length();
1895   for (unsigned d = G.context_depth; d < dlen; ++d)
1896     {
1897       vec<finalizer> &v = G.finalizers[d];
1898       unsigned length = v.length ();
1899       for (unsigned int i = 0; i < length;)
1900 	{
1901 	  finalizer &f = v[i];
1902 	  if (!ggc_marked_p (f.addr ()))
1903 	    {
1904 	      f.call ();
1905 	      v.unordered_remove (i);
1906 	      length--;
1907 	    }
1908 	  else
1909 	    i++;
1910 	}
1911     }
1912 
1913   gcc_assert (dlen == G.vec_finalizers.length());
1914   for (unsigned d = G.context_depth; d < dlen; ++d)
1915     {
1916       vec<vec_finalizer> &vv = G.vec_finalizers[d];
1917       unsigned length = vv.length ();
1918       for (unsigned int i = 0; i < length;)
1919 	{
1920 	  vec_finalizer &f = vv[i];
1921 	  if (!ggc_marked_p (f.addr ()))
1922 	    {
1923 	      f.call ();
1924 	      vv.unordered_remove (i);
1925 	      length--;
1926 	    }
1927 	  else
1928 	    i++;
1929 	}
1930     }
1931 }
1932 
1933 /* Free all empty pages.  Partially empty pages need no attention
1934    because the `mark' bit doubles as an `unused' bit.  */
1935 
1936 static void
sweep_pages(void)1937 sweep_pages (void)
1938 {
1939   unsigned order;
1940 
1941   for (order = 2; order < NUM_ORDERS; order++)
1942     {
1943       /* The last page-entry to consider, regardless of entries
1944 	 placed at the end of the list.  */
1945       page_entry * const last = G.page_tails[order];
1946 
1947       size_t num_objects;
1948       size_t live_objects;
1949       page_entry *p, *previous;
1950       int done;
1951 
1952       p = G.pages[order];
1953       if (p == NULL)
1954 	continue;
1955 
1956       previous = NULL;
1957       do
1958 	{
1959 	  page_entry *next = p->next;
1960 
1961 	  /* Loop until all entries have been examined.  */
1962 	  done = (p == last);
1963 
1964 	  num_objects = OBJECTS_IN_PAGE (p);
1965 
1966 	  /* Add all live objects on this page to the count of
1967              allocated memory.  */
1968 	  live_objects = num_objects - p->num_free_objects;
1969 
1970 	  G.allocated += OBJECT_SIZE (order) * live_objects;
1971 
1972 	  /* Only objects on pages in the topmost context should get
1973 	     collected.  */
1974 	  if (p->context_depth < G.context_depth)
1975 	    ;
1976 
1977 	  /* Remove the page if it's empty.  */
1978 	  else if (live_objects == 0)
1979 	    {
1980 	      /* If P was the first page in the list, then NEXT
1981 		 becomes the new first page in the list, otherwise
1982 		 splice P out of the forward pointers.  */
1983 	      if (! previous)
1984 		G.pages[order] = next;
1985 	      else
1986 		previous->next = next;
1987 
1988 	      /* Splice P out of the back pointers too.  */
1989 	      if (next)
1990 		next->prev = previous;
1991 
1992 	      /* Are we removing the last element?  */
1993 	      if (p == G.page_tails[order])
1994 		G.page_tails[order] = previous;
1995 	      free_page (p);
1996 	      p = previous;
1997 	    }
1998 
1999 	  /* If the page is full, move it to the end.  */
2000 	  else if (p->num_free_objects == 0)
2001 	    {
2002 	      /* Don't move it if it's already at the end.  */
2003 	      if (p != G.page_tails[order])
2004 		{
2005 		  /* Move p to the end of the list.  */
2006 		  p->next = NULL;
2007 		  p->prev = G.page_tails[order];
2008 		  G.page_tails[order]->next = p;
2009 
2010 		  /* Update the tail pointer...  */
2011 		  G.page_tails[order] = p;
2012 
2013 		  /* ... and the head pointer, if necessary.  */
2014 		  if (! previous)
2015 		    G.pages[order] = next;
2016 		  else
2017 		    previous->next = next;
2018 
2019 		  /* And update the backpointer in NEXT if necessary.  */
2020 		  if (next)
2021 		    next->prev = previous;
2022 
2023 		  p = previous;
2024 		}
2025 	    }
2026 
2027 	  /* If we've fallen through to here, it's a page in the
2028 	     topmost context that is neither full nor empty.  Such a
2029 	     page must precede pages at lesser context depth in the
2030 	     list, so move it to the head.  */
2031 	  else if (p != G.pages[order])
2032 	    {
2033 	      previous->next = p->next;
2034 
2035 	      /* Update the backchain in the next node if it exists.  */
2036 	      if (p->next)
2037 		p->next->prev = previous;
2038 
2039 	      /* Move P to the head of the list.  */
2040 	      p->next = G.pages[order];
2041 	      p->prev = NULL;
2042 	      G.pages[order]->prev = p;
2043 
2044 	      /* Update the head pointer.  */
2045 	      G.pages[order] = p;
2046 
2047 	      /* Are we moving the last element?  */
2048 	      if (G.page_tails[order] == p)
2049 	        G.page_tails[order] = previous;
2050 	      p = previous;
2051 	    }
2052 
2053 	  previous = p;
2054 	  p = next;
2055 	}
2056       while (! done);
2057 
2058       /* Now, restore the in_use_p vectors for any pages from contexts
2059          other than the current one.  */
2060       for (p = G.pages[order]; p; p = p->next)
2061 	if (p->context_depth != G.context_depth)
2062 	  ggc_recalculate_in_use_p (p);
2063     }
2064 }
2065 
2066 #ifdef ENABLE_GC_CHECKING
2067 /* Clobber all free objects.  */
2068 
2069 static void
poison_pages(void)2070 poison_pages (void)
2071 {
2072   unsigned order;
2073 
2074   for (order = 2; order < NUM_ORDERS; order++)
2075     {
2076       size_t size = OBJECT_SIZE (order);
2077       page_entry *p;
2078 
2079       for (p = G.pages[order]; p != NULL; p = p->next)
2080 	{
2081 	  size_t num_objects;
2082 	  size_t i;
2083 
2084 	  if (p->context_depth != G.context_depth)
2085 	    /* Since we don't do any collection for pages in pushed
2086 	       contexts, there's no need to do any poisoning.  And
2087 	       besides, the IN_USE_P array isn't valid until we pop
2088 	       contexts.  */
2089 	    continue;
2090 
2091 	  num_objects = OBJECTS_IN_PAGE (p);
2092 	  for (i = 0; i < num_objects; i++)
2093 	    {
2094 	      size_t word, bit;
2095 	      word = i / HOST_BITS_PER_LONG;
2096 	      bit = i % HOST_BITS_PER_LONG;
2097 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
2098 		{
2099 		  char *object = p->page + i * size;
2100 
2101 		  /* Keep poison-by-write when we expect to use Valgrind,
2102 		     so the exact same memory semantics is kept, in case
2103 		     there are memory errors.  We override this request
2104 		     below.  */
2105 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2106 								 size));
2107 		  memset (object, 0xa5, size);
2108 
2109 		  /* Drop the handle to avoid handle leak.  */
2110 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2111 		}
2112 	    }
2113 	}
2114     }
2115 }
2116 #else
2117 #define poison_pages()
2118 #endif
2119 
2120 #ifdef ENABLE_GC_ALWAYS_COLLECT
2121 /* Validate that the reportedly free objects actually are.  */
2122 
2123 static void
validate_free_objects(void)2124 validate_free_objects (void)
2125 {
2126   struct free_object *f, *next, *still_free = NULL;
2127 
2128   for (f = G.free_object_list; f ; f = next)
2129     {
2130       page_entry *pe = lookup_page_table_entry (f->object);
2131       size_t bit, word;
2132 
2133       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2134       word = bit / HOST_BITS_PER_LONG;
2135       bit = bit % HOST_BITS_PER_LONG;
2136       next = f->next;
2137 
2138       /* Make certain it isn't visible from any root.  Notice that we
2139 	 do this check before sweep_pages merges save_in_use_p.  */
2140       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2141 
2142       /* If the object comes from an outer context, then retain the
2143 	 free_object entry, so that we can verify that the address
2144 	 isn't live on the stack in some outer context.  */
2145       if (pe->context_depth != G.context_depth)
2146 	{
2147 	  f->next = still_free;
2148 	  still_free = f;
2149 	}
2150       else
2151 	free (f);
2152     }
2153 
2154   G.free_object_list = still_free;
2155 }
2156 #else
2157 #define validate_free_objects()
2158 #endif
2159 
2160 /* Top level mark-and-sweep routine.  */
2161 
2162 void
ggc_collect(void)2163 ggc_collect (void)
2164 {
2165   /* Avoid frequent unnecessary work by skipping collection if the
2166      total allocations haven't expanded much since the last
2167      collection.  */
2168   float allocated_last_gc =
2169     MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2170 
2171   float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2172   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2173     return;
2174 
2175   timevar_push (TV_GC);
2176   if (!quiet_flag)
2177     fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2178   if (GGC_DEBUG_LEVEL >= 2)
2179     fprintf (G.debug_file, "BEGIN COLLECTING\n");
2180 
2181   /* Zero the total allocated bytes.  This will be recalculated in the
2182      sweep phase.  */
2183   G.allocated = 0;
2184 
2185   /* Release the pages we freed the last time we collected, but didn't
2186      reuse in the interim.  */
2187   release_pages ();
2188 
2189   /* Indicate that we've seen collections at this context depth.  */
2190   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2191 
2192   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2193 
2194   in_gc = true;
2195   clear_marks ();
2196   ggc_mark_roots ();
2197   ggc_handle_finalizers ();
2198 
2199   if (GATHER_STATISTICS)
2200     ggc_prune_overhead_list ();
2201 
2202   poison_pages ();
2203   validate_free_objects ();
2204   sweep_pages ();
2205 
2206   in_gc = false;
2207   G.allocated_last_gc = G.allocated;
2208 
2209   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2210 
2211   timevar_pop (TV_GC);
2212 
2213   if (!quiet_flag)
2214     fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2215   if (GGC_DEBUG_LEVEL >= 2)
2216     fprintf (G.debug_file, "END COLLECTING\n");
2217 }
2218 
2219 /* Assume that all GGC memory is reachable and grow the limits for next collection.
2220    With checking, trigger GGC so -Q compilation outputs how much of memory really is
2221    reachable.  */
2222 
2223 void
ggc_grow(void)2224 ggc_grow (void)
2225 {
2226   if (!flag_checking)
2227     G.allocated_last_gc = MAX (G.allocated_last_gc,
2228 			       G.allocated);
2229   else
2230     ggc_collect ();
2231   if (!quiet_flag)
2232     fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2233 }
2234 
2235 /* Print allocation statistics.  */
2236 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2237 		  ? (x) \
2238 		  : ((x) < 1024*1024*10 \
2239 		     ? (x) / 1024 \
2240 		     : (x) / (1024*1024))))
2241 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2242 
2243 void
ggc_print_statistics(void)2244 ggc_print_statistics (void)
2245 {
2246   struct ggc_statistics stats;
2247   unsigned int i;
2248   size_t total_overhead = 0;
2249 
2250   /* Clear the statistics.  */
2251   memset (&stats, 0, sizeof (stats));
2252 
2253   /* Make sure collection will really occur.  */
2254   G.allocated_last_gc = 0;
2255 
2256   /* Collect and print the statistics common across collectors.  */
2257   ggc_print_common_statistics (stderr, &stats);
2258 
2259   /* Release free pages so that we will not count the bytes allocated
2260      there as part of the total allocated memory.  */
2261   release_pages ();
2262 
2263   /* Collect some information about the various sizes of
2264      allocation.  */
2265   fprintf (stderr,
2266            "Memory still allocated at the end of the compilation process\n");
2267   fprintf (stderr, "%-8s %10s  %10s  %10s\n",
2268 	   "Size", "Allocated", "Used", "Overhead");
2269   for (i = 0; i < NUM_ORDERS; ++i)
2270     {
2271       page_entry *p;
2272       size_t allocated;
2273       size_t in_use;
2274       size_t overhead;
2275 
2276       /* Skip empty entries.  */
2277       if (!G.pages[i])
2278 	continue;
2279 
2280       overhead = allocated = in_use = 0;
2281 
2282       /* Figure out the total number of bytes allocated for objects of
2283 	 this size, and how many of them are actually in use.  Also figure
2284 	 out how much memory the page table is using.  */
2285       for (p = G.pages[i]; p; p = p->next)
2286 	{
2287 	  allocated += p->bytes;
2288 	  in_use +=
2289 	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2290 
2291 	  overhead += (sizeof (page_entry) - sizeof (long)
2292 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2293 	}
2294       fprintf (stderr, "%-8lu %10lu%c %10lu%c %10lu%c\n",
2295 	       (unsigned long) OBJECT_SIZE (i),
2296 	       SCALE (allocated), STAT_LABEL (allocated),
2297 	       SCALE (in_use), STAT_LABEL (in_use),
2298 	       SCALE (overhead), STAT_LABEL (overhead));
2299       total_overhead += overhead;
2300     }
2301   fprintf (stderr, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
2302 	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2303 	   SCALE (G.allocated), STAT_LABEL (G.allocated),
2304 	   SCALE (total_overhead), STAT_LABEL (total_overhead));
2305 
2306   if (GATHER_STATISTICS)
2307     {
2308       fprintf (stderr, "\nTotal allocations and overheads during "
2309 	       "the compilation process\n");
2310 
2311       fprintf (stderr, "Total Overhead:                          %10"
2312 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead);
2313       fprintf (stderr, "Total Allocated:                         %10"
2314 	       HOST_LONG_LONG_FORMAT "d\n",
2315 	       G.stats.total_allocated);
2316 
2317       fprintf (stderr, "Total Overhead  under  32B:              %10"
2318 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under32);
2319       fprintf (stderr, "Total Allocated under  32B:              %10"
2320 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under32);
2321       fprintf (stderr, "Total Overhead  under  64B:              %10"
2322 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under64);
2323       fprintf (stderr, "Total Allocated under  64B:              %10"
2324 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under64);
2325       fprintf (stderr, "Total Overhead  under 128B:              %10"
2326 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under128);
2327       fprintf (stderr, "Total Allocated under 128B:              %10"
2328 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under128);
2329 
2330       for (i = 0; i < NUM_ORDERS; i++)
2331 	if (G.stats.total_allocated_per_order[i])
2332 	  {
2333 	    fprintf (stderr, "Total Overhead  page size %9lu:     %10"
2334 		     HOST_LONG_LONG_FORMAT "d\n",
2335 		     (unsigned long) OBJECT_SIZE (i),
2336 		     G.stats.total_overhead_per_order[i]);
2337 	    fprintf (stderr, "Total Allocated page size %9lu:     %10"
2338 		     HOST_LONG_LONG_FORMAT "d\n",
2339 		     (unsigned long) OBJECT_SIZE (i),
2340 		     G.stats.total_allocated_per_order[i]);
2341 	  }
2342   }
2343 }
2344 
2345 struct ggc_pch_ondisk
2346 {
2347   unsigned totals[NUM_ORDERS];
2348 };
2349 
2350 struct ggc_pch_data
2351 {
2352   struct ggc_pch_ondisk d;
2353   uintptr_t base[NUM_ORDERS];
2354   size_t written[NUM_ORDERS];
2355 };
2356 
2357 struct ggc_pch_data *
init_ggc_pch(void)2358 init_ggc_pch (void)
2359 {
2360   return XCNEW (struct ggc_pch_data);
2361 }
2362 
2363 void
ggc_pch_count_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2364 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2365 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2366 {
2367   unsigned order;
2368 
2369   if (size < NUM_SIZE_LOOKUP)
2370     order = size_lookup[size];
2371   else
2372     {
2373       order = 10;
2374       while (size > OBJECT_SIZE (order))
2375 	order++;
2376     }
2377 
2378   d->d.totals[order]++;
2379 }
2380 
2381 size_t
ggc_pch_total_size(struct ggc_pch_data * d)2382 ggc_pch_total_size (struct ggc_pch_data *d)
2383 {
2384   size_t a = 0;
2385   unsigned i;
2386 
2387   for (i = 0; i < NUM_ORDERS; i++)
2388     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2389   return a;
2390 }
2391 
2392 void
ggc_pch_this_base(struct ggc_pch_data * d,void * base)2393 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2394 {
2395   uintptr_t a = (uintptr_t) base;
2396   unsigned i;
2397 
2398   for (i = 0; i < NUM_ORDERS; i++)
2399     {
2400       d->base[i] = a;
2401       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2402     }
2403 }
2404 
2405 
2406 char *
ggc_pch_alloc_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2407 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2408 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2409 {
2410   unsigned order;
2411   char *result;
2412 
2413   if (size < NUM_SIZE_LOOKUP)
2414     order = size_lookup[size];
2415   else
2416     {
2417       order = 10;
2418       while (size > OBJECT_SIZE (order))
2419 	order++;
2420     }
2421 
2422   result = (char *) d->base[order];
2423   d->base[order] += OBJECT_SIZE (order);
2424   return result;
2425 }
2426 
2427 void
ggc_pch_prepare_write(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f ATTRIBUTE_UNUSED)2428 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2429 		       FILE *f ATTRIBUTE_UNUSED)
2430 {
2431   /* Nothing to do.  */
2432 }
2433 
2434 void
ggc_pch_write_object(struct ggc_pch_data * d,FILE * f,void * x,void * newx ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2435 ggc_pch_write_object (struct ggc_pch_data *d,
2436 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2437 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2438 {
2439   unsigned order;
2440   static const char emptyBytes[256] = { 0 };
2441 
2442   if (size < NUM_SIZE_LOOKUP)
2443     order = size_lookup[size];
2444   else
2445     {
2446       order = 10;
2447       while (size > OBJECT_SIZE (order))
2448 	order++;
2449     }
2450 
2451   if (fwrite (x, size, 1, f) != 1)
2452     fatal_error (input_location, "can%'t write PCH file: %m");
2453 
2454   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2455      object out to OBJECT_SIZE(order).  This happens for strings.  */
2456 
2457   if (size != OBJECT_SIZE (order))
2458     {
2459       unsigned padding = OBJECT_SIZE (order) - size;
2460 
2461       /* To speed small writes, we use a nulled-out array that's larger
2462          than most padding requests as the source for our null bytes.  This
2463          permits us to do the padding with fwrite() rather than fseek(), and
2464          limits the chance the OS may try to flush any outstanding writes.  */
2465       if (padding <= sizeof (emptyBytes))
2466         {
2467           if (fwrite (emptyBytes, 1, padding, f) != padding)
2468             fatal_error (input_location, "can%'t write PCH file");
2469         }
2470       else
2471         {
2472           /* Larger than our buffer?  Just default to fseek.  */
2473           if (fseek (f, padding, SEEK_CUR) != 0)
2474             fatal_error (input_location, "can%'t write PCH file");
2475         }
2476     }
2477 
2478   d->written[order]++;
2479   if (d->written[order] == d->d.totals[order]
2480       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2481 				   G.pagesize),
2482 		SEEK_CUR) != 0)
2483     fatal_error (input_location, "can%'t write PCH file: %m");
2484 }
2485 
2486 void
ggc_pch_finish(struct ggc_pch_data * d,FILE * f)2487 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2488 {
2489   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2490     fatal_error (input_location, "can%'t write PCH file: %m");
2491   free (d);
2492 }
2493 
2494 /* Move the PCH PTE entries just added to the end of by_depth, to the
2495    front.  */
2496 
2497 static void
move_ptes_to_front(int count_old_page_tables,int count_new_page_tables)2498 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2499 {
2500   unsigned i;
2501 
2502   /* First, we swap the new entries to the front of the varrays.  */
2503   page_entry **new_by_depth;
2504   unsigned long **new_save_in_use;
2505 
2506   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2507   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2508 
2509   memcpy (&new_by_depth[0],
2510 	  &G.by_depth[count_old_page_tables],
2511 	  count_new_page_tables * sizeof (void *));
2512   memcpy (&new_by_depth[count_new_page_tables],
2513 	  &G.by_depth[0],
2514 	  count_old_page_tables * sizeof (void *));
2515   memcpy (&new_save_in_use[0],
2516 	  &G.save_in_use[count_old_page_tables],
2517 	  count_new_page_tables * sizeof (void *));
2518   memcpy (&new_save_in_use[count_new_page_tables],
2519 	  &G.save_in_use[0],
2520 	  count_old_page_tables * sizeof (void *));
2521 
2522   free (G.by_depth);
2523   free (G.save_in_use);
2524 
2525   G.by_depth = new_by_depth;
2526   G.save_in_use = new_save_in_use;
2527 
2528   /* Now update all the index_by_depth fields.  */
2529   for (i = G.by_depth_in_use; i > 0; --i)
2530     {
2531       page_entry *p = G.by_depth[i-1];
2532       p->index_by_depth = i-1;
2533     }
2534 
2535   /* And last, we update the depth pointers in G.depth.  The first
2536      entry is already 0, and context 0 entries always start at index
2537      0, so there is nothing to update in the first slot.  We need a
2538      second slot, only if we have old ptes, and if we do, they start
2539      at index count_new_page_tables.  */
2540   if (count_old_page_tables)
2541     push_depth (count_new_page_tables);
2542 }
2543 
2544 void
ggc_pch_read(FILE * f,void * addr)2545 ggc_pch_read (FILE *f, void *addr)
2546 {
2547   struct ggc_pch_ondisk d;
2548   unsigned i;
2549   char *offs = (char *) addr;
2550   unsigned long count_old_page_tables;
2551   unsigned long count_new_page_tables;
2552 
2553   count_old_page_tables = G.by_depth_in_use;
2554 
2555   /* We've just read in a PCH file.  So, every object that used to be
2556      allocated is now free.  */
2557   clear_marks ();
2558 #ifdef ENABLE_GC_CHECKING
2559   poison_pages ();
2560 #endif
2561   /* Since we free all the allocated objects, the free list becomes
2562      useless.  Validate it now, which will also clear it.  */
2563   validate_free_objects ();
2564 
2565   /* No object read from a PCH file should ever be freed.  So, set the
2566      context depth to 1, and set the depth of all the currently-allocated
2567      pages to be 1 too.  PCH pages will have depth 0.  */
2568   gcc_assert (!G.context_depth);
2569   G.context_depth = 1;
2570   /* Allocate space for the depth 1 finalizers.  */
2571   G.finalizers.safe_push (vNULL);
2572   G.vec_finalizers.safe_push (vNULL);
2573   gcc_assert (G.finalizers.length() == 2);
2574   for (i = 0; i < NUM_ORDERS; i++)
2575     {
2576       page_entry *p;
2577       for (p = G.pages[i]; p != NULL; p = p->next)
2578 	p->context_depth = G.context_depth;
2579     }
2580 
2581   /* Allocate the appropriate page-table entries for the pages read from
2582      the PCH file.  */
2583   if (fread (&d, sizeof (d), 1, f) != 1)
2584     fatal_error (input_location, "can%'t read PCH file: %m");
2585 
2586   for (i = 0; i < NUM_ORDERS; i++)
2587     {
2588       struct page_entry *entry;
2589       char *pte;
2590       size_t bytes;
2591       size_t num_objs;
2592       size_t j;
2593 
2594       if (d.totals[i] == 0)
2595 	continue;
2596 
2597       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2598       num_objs = bytes / OBJECT_SIZE (i);
2599       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2600 					    - sizeof (long)
2601 					    + BITMAP_SIZE (num_objs + 1)));
2602       entry->bytes = bytes;
2603       entry->page = offs;
2604       entry->context_depth = 0;
2605       offs += bytes;
2606       entry->num_free_objects = 0;
2607       entry->order = i;
2608 
2609       for (j = 0;
2610 	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2611 	   j += HOST_BITS_PER_LONG)
2612 	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2613       for (; j < num_objs + 1; j++)
2614 	entry->in_use_p[j / HOST_BITS_PER_LONG]
2615 	  |= 1L << (j % HOST_BITS_PER_LONG);
2616 
2617       for (pte = entry->page;
2618 	   pte < entry->page + entry->bytes;
2619 	   pte += G.pagesize)
2620 	set_page_table_entry (pte, entry);
2621 
2622       if (G.page_tails[i] != NULL)
2623 	G.page_tails[i]->next = entry;
2624       else
2625 	G.pages[i] = entry;
2626       G.page_tails[i] = entry;
2627 
2628       /* We start off by just adding all the new information to the
2629 	 end of the varrays, later, we will move the new information
2630 	 to the front of the varrays, as the PCH page tables are at
2631 	 context 0.  */
2632       push_by_depth (entry, 0);
2633     }
2634 
2635   /* Now, we update the various data structures that speed page table
2636      handling.  */
2637   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2638 
2639   move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2640 
2641   /* Update the statistics.  */
2642   G.allocated = G.allocated_last_gc = offs - (char *)addr;
2643 }
2644