1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2    Copyright (C) 1999-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "diagnostic-core.h"
28 #include "flags.h"
29 #include "ggc.h"
30 #include "ggc-internal.h"
31 #include "timevar.h"
32 #include "params.h"
33 #include "tree-flow.h"
34 #include "cfgloop.h"
35 #include "plugin.h"
36 
37 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38    file open.  Prefer either to valloc.  */
39 #ifdef HAVE_MMAP_ANON
40 # undef HAVE_MMAP_DEV_ZERO
41 # define USING_MMAP
42 #endif
43 
44 #ifdef HAVE_MMAP_DEV_ZERO
45 # define USING_MMAP
46 #endif
47 
48 #ifndef USING_MMAP
49 #define USING_MALLOC_PAGE_GROUPS
50 #endif
51 
52 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53     && defined(USING_MMAP)
54 # define USING_MADVISE
55 #endif
56 
57 /* Strategy:
58 
59    This garbage-collecting allocator allocates objects on one of a set
60    of pages.  Each page can allocate objects of a single size only;
61    available sizes are powers of two starting at four bytes.  The size
62    of an allocation request is rounded up to the next power of two
63    (`order'), and satisfied from the appropriate page.
64 
65    Each page is recorded in a page-entry, which also maintains an
66    in-use bitmap of object positions on the page.  This allows the
67    allocation state of a particular object to be flipped without
68    touching the page itself.
69 
70    Each page-entry also has a context depth, which is used to track
71    pushing and popping of allocation contexts.  Only objects allocated
72    in the current (highest-numbered) context may be collected.
73 
74    Page entries are arranged in an array of singly-linked lists.  The
75    array is indexed by the allocation size, in bits, of the pages on
76    it; i.e. all pages on a list allocate objects of the same size.
77    Pages are ordered on the list such that all non-full pages precede
78    all full pages, with non-full pages arranged in order of decreasing
79    context depth.
80 
81    Empty pages (of all orders) are kept on a single page cache list,
82    and are considered first when new pages are required; they are
83    deallocated at the start of the next collection if they haven't
84    been recycled by then.  */
85 
86 /* Define GGC_DEBUG_LEVEL to print debugging information.
87      0: No debugging output.
88      1: GC statistics only.
89      2: Page-entry allocations/deallocations as well.
90      3: Object allocations as well.
91      4: Object marks as well.  */
92 #define GGC_DEBUG_LEVEL (0)
93 
94 #ifndef HOST_BITS_PER_PTR
95 #define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
96 #endif
97 
98 
99 /* A two-level tree is used to look up the page-entry for a given
100    pointer.  Two chunks of the pointer's bits are extracted to index
101    the first and second levels of the tree, as follows:
102 
103 				   HOST_PAGE_SIZE_BITS
104 			   32		|      |
105        msb +----------------+----+------+------+ lsb
106 			    |    |      |
107 			 PAGE_L1_BITS   |
108 				 |      |
109 			       PAGE_L2_BITS
110 
111    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
112    pages are aligned on system page boundaries.  The next most
113    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
114    index values in the lookup table, respectively.
115 
116    For 32-bit architectures and the settings below, there are no
117    leftover bits.  For architectures with wider pointers, the lookup
118    tree points to a list of pages, which must be scanned to find the
119    correct one.  */
120 
121 #define PAGE_L1_BITS	(8)
122 #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
123 #define PAGE_L1_SIZE	((uintptr_t) 1 << PAGE_L1_BITS)
124 #define PAGE_L2_SIZE	((uintptr_t) 1 << PAGE_L2_BITS)
125 
126 #define LOOKUP_L1(p) \
127   (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
128 
129 #define LOOKUP_L2(p) \
130   (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
131 
132 /* The number of objects per allocation page, for objects on a page of
133    the indicated ORDER.  */
134 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
135 
136 /* The number of objects in P.  */
137 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
138 
139 /* The size of an object on a page of the indicated ORDER.  */
140 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
141 
142 /* For speed, we avoid doing a general integer divide to locate the
143    offset in the allocation bitmap, by precalculating numbers M, S
144    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
145    within the page which is evenly divisible by the object size Z.  */
146 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
147 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
148 #define OFFSET_TO_BIT(OFFSET, ORDER) \
149   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
150 
151 /* We use this structure to determine the alignment required for
152    allocations.  For power-of-two sized allocations, that's not a
153    problem, but it does matter for odd-sized allocations.
154    We do not care about alignment for floating-point types.  */
155 
156 struct max_alignment {
157   char c;
158   union {
159     HOST_WIDEST_INT i;
160     void *p;
161   } u;
162 };
163 
164 /* The biggest alignment required.  */
165 
166 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
167 
168 
169 /* The number of extra orders, not corresponding to power-of-two sized
170    objects.  */
171 
172 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
173 
174 #define RTL_SIZE(NSLOTS) \
175   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
176 
177 #define TREE_EXP_SIZE(OPS) \
178   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
179 
180 /* The Ith entry is the maximum size of an object to be stored in the
181    Ith extra order.  Adding a new entry to this array is the *only*
182    thing you need to do to add a new special allocation size.  */
183 
184 static const size_t extra_order_size_table[] = {
185   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
186      There are a lot of structures with these sizes and explicitly
187      listing them risks orders being dropped because they changed size.  */
188   MAX_ALIGNMENT * 3,
189   MAX_ALIGNMENT * 5,
190   MAX_ALIGNMENT * 6,
191   MAX_ALIGNMENT * 7,
192   MAX_ALIGNMENT * 9,
193   MAX_ALIGNMENT * 10,
194   MAX_ALIGNMENT * 11,
195   MAX_ALIGNMENT * 12,
196   MAX_ALIGNMENT * 13,
197   MAX_ALIGNMENT * 14,
198   MAX_ALIGNMENT * 15,
199   sizeof (struct tree_decl_non_common),
200   sizeof (struct tree_field_decl),
201   sizeof (struct tree_parm_decl),
202   sizeof (struct tree_var_decl),
203   sizeof (struct tree_type_non_common),
204   sizeof (struct function),
205   sizeof (struct basic_block_def),
206   sizeof (struct cgraph_node),
207   sizeof (struct loop),
208 };
209 
210 /* The total number of orders.  */
211 
212 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
213 
214 /* Compute the smallest nonnegative number which when added to X gives
215    a multiple of F.  */
216 
217 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
218 
219 /* Compute the smallest multiple of F that is >= X.  */
220 
221 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
222 
223 /* Round X to next multiple of the page size */
224 
225 #define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
226 
227 /* The Ith entry is the number of objects on a page or order I.  */
228 
229 static unsigned objects_per_page_table[NUM_ORDERS];
230 
231 /* The Ith entry is the size of an object on a page of order I.  */
232 
233 static size_t object_size_table[NUM_ORDERS];
234 
235 /* The Ith entry is a pair of numbers (mult, shift) such that
236    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
237    for all k evenly divisible by OBJECT_SIZE(I).  */
238 
239 static struct
240 {
241   size_t mult;
242   unsigned int shift;
243 }
244 inverse_table[NUM_ORDERS];
245 
246 /* A page_entry records the status of an allocation page.  This
247    structure is dynamically sized to fit the bitmap in_use_p.  */
248 typedef struct page_entry
249 {
250   /* The next page-entry with objects of the same size, or NULL if
251      this is the last page-entry.  */
252   struct page_entry *next;
253 
254   /* The previous page-entry with objects of the same size, or NULL if
255      this is the first page-entry.   The PREV pointer exists solely to
256      keep the cost of ggc_free manageable.  */
257   struct page_entry *prev;
258 
259   /* The number of bytes allocated.  (This will always be a multiple
260      of the host system page size.)  */
261   size_t bytes;
262 
263   /* The address at which the memory is allocated.  */
264   char *page;
265 
266 #ifdef USING_MALLOC_PAGE_GROUPS
267   /* Back pointer to the page group this page came from.  */
268   struct page_group *group;
269 #endif
270 
271   /* This is the index in the by_depth varray where this page table
272      can be found.  */
273   unsigned long index_by_depth;
274 
275   /* Context depth of this page.  */
276   unsigned short context_depth;
277 
278   /* The number of free objects remaining on this page.  */
279   unsigned short num_free_objects;
280 
281   /* A likely candidate for the bit position of a free object for the
282      next allocation from this page.  */
283   unsigned short next_bit_hint;
284 
285   /* The lg of size of objects allocated from this page.  */
286   unsigned char order;
287 
288   /* Discarded page? */
289   bool discarded;
290 
291   /* A bit vector indicating whether or not objects are in use.  The
292      Nth bit is one if the Nth object on this page is allocated.  This
293      array is dynamically sized.  */
294   unsigned long in_use_p[1];
295 } page_entry;
296 
297 #ifdef USING_MALLOC_PAGE_GROUPS
298 /* A page_group describes a large allocation from malloc, from which
299    we parcel out aligned pages.  */
300 typedef struct page_group
301 {
302   /* A linked list of all extant page groups.  */
303   struct page_group *next;
304 
305   /* The address we received from malloc.  */
306   char *allocation;
307 
308   /* The size of the block.  */
309   size_t alloc_size;
310 
311   /* A bitmask of pages in use.  */
312   unsigned int in_use;
313 } page_group;
314 #endif
315 
316 #if HOST_BITS_PER_PTR <= 32
317 
318 /* On 32-bit hosts, we use a two level page table, as pictured above.  */
319 typedef page_entry **page_table[PAGE_L1_SIZE];
320 
321 #else
322 
323 /* On 64-bit hosts, we use the same two level page tables plus a linked
324    list that disambiguates the top 32-bits.  There will almost always be
325    exactly one entry in the list.  */
326 typedef struct page_table_chain
327 {
328   struct page_table_chain *next;
329   size_t high_bits;
330   page_entry **table[PAGE_L1_SIZE];
331 } *page_table;
332 
333 #endif
334 
335 #ifdef ENABLE_GC_ALWAYS_COLLECT
336 /* List of free objects to be verified as actually free on the
337    next collection.  */
338 struct free_object
339 {
340   void *object;
341   struct free_object *next;
342 };
343 #endif
344 
345 /* The rest of the global variables.  */
346 static struct globals
347 {
348   /* The Nth element in this array is a page with objects of size 2^N.
349      If there are any pages with free objects, they will be at the
350      head of the list.  NULL if there are no page-entries for this
351      object size.  */
352   page_entry *pages[NUM_ORDERS];
353 
354   /* The Nth element in this array is the last page with objects of
355      size 2^N.  NULL if there are no page-entries for this object
356      size.  */
357   page_entry *page_tails[NUM_ORDERS];
358 
359   /* Lookup table for associating allocation pages with object addresses.  */
360   page_table lookup;
361 
362   /* The system's page size.  */
363   size_t pagesize;
364   size_t lg_pagesize;
365 
366   /* Bytes currently allocated.  */
367   size_t allocated;
368 
369   /* Bytes currently allocated at the end of the last collection.  */
370   size_t allocated_last_gc;
371 
372   /* Total amount of memory mapped.  */
373   size_t bytes_mapped;
374 
375   /* Bit N set if any allocations have been done at context depth N.  */
376   unsigned long context_depth_allocations;
377 
378   /* Bit N set if any collections have been done at context depth N.  */
379   unsigned long context_depth_collections;
380 
381   /* The current depth in the context stack.  */
382   unsigned short context_depth;
383 
384   /* A file descriptor open to /dev/zero for reading.  */
385 #if defined (HAVE_MMAP_DEV_ZERO)
386   int dev_zero_fd;
387 #endif
388 
389   /* A cache of free system pages.  */
390   page_entry *free_pages;
391 
392 #ifdef USING_MALLOC_PAGE_GROUPS
393   page_group *page_groups;
394 #endif
395 
396   /* The file descriptor for debugging output.  */
397   FILE *debug_file;
398 
399   /* Current number of elements in use in depth below.  */
400   unsigned int depth_in_use;
401 
402   /* Maximum number of elements that can be used before resizing.  */
403   unsigned int depth_max;
404 
405   /* Each element of this array is an index in by_depth where the given
406      depth starts.  This structure is indexed by that given depth we
407      are interested in.  */
408   unsigned int *depth;
409 
410   /* Current number of elements in use in by_depth below.  */
411   unsigned int by_depth_in_use;
412 
413   /* Maximum number of elements that can be used before resizing.  */
414   unsigned int by_depth_max;
415 
416   /* Each element of this array is a pointer to a page_entry, all
417      page_entries can be found in here by increasing depth.
418      index_by_depth in the page_entry is the index into this data
419      structure where that page_entry can be found.  This is used to
420      speed up finding all page_entries at a particular depth.  */
421   page_entry **by_depth;
422 
423   /* Each element is a pointer to the saved in_use_p bits, if any,
424      zero otherwise.  We allocate them all together, to enable a
425      better runtime data access pattern.  */
426   unsigned long **save_in_use;
427 
428 #ifdef ENABLE_GC_ALWAYS_COLLECT
429   /* List of free objects to be verified as actually free on the
430      next collection.  */
431   struct free_object *free_object_list;
432 #endif
433 
434   struct
435   {
436     /* Total GC-allocated memory.  */
437     unsigned long long total_allocated;
438     /* Total overhead for GC-allocated memory.  */
439     unsigned long long total_overhead;
440 
441     /* Total allocations and overhead for sizes less than 32, 64 and 128.
442        These sizes are interesting because they are typical cache line
443        sizes.  */
444 
445     unsigned long long total_allocated_under32;
446     unsigned long long total_overhead_under32;
447 
448     unsigned long long total_allocated_under64;
449     unsigned long long total_overhead_under64;
450 
451     unsigned long long total_allocated_under128;
452     unsigned long long total_overhead_under128;
453 
454     /* The allocations for each of the allocation orders.  */
455     unsigned long long total_allocated_per_order[NUM_ORDERS];
456 
457     /* The overhead for each of the allocation orders.  */
458     unsigned long long total_overhead_per_order[NUM_ORDERS];
459   } stats;
460 } G;
461 
462 /* The size in bytes required to maintain a bitmap for the objects
463    on a page-entry.  */
464 #define BITMAP_SIZE(Num_objects) \
465   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
466 
467 /* Allocate pages in chunks of this size, to throttle calls to memory
468    allocation routines.  The first page is used, the rest go onto the
469    free list.  This cannot be larger than HOST_BITS_PER_INT for the
470    in_use bitmask for page_group.  Hosts that need a different value
471    can override this by defining GGC_QUIRE_SIZE explicitly.  */
472 #ifndef GGC_QUIRE_SIZE
473 # ifdef USING_MMAP
474 #  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
475 # else
476 #  define GGC_QUIRE_SIZE 16
477 # endif
478 #endif
479 
480 /* Initial guess as to how many page table entries we might need.  */
481 #define INITIAL_PTE_COUNT 128
482 
483 static int ggc_allocated_p (const void *);
484 static page_entry *lookup_page_table_entry (const void *);
485 static void set_page_table_entry (void *, page_entry *);
486 #ifdef USING_MMAP
487 static char *alloc_anon (char *, size_t, bool check);
488 #endif
489 #ifdef USING_MALLOC_PAGE_GROUPS
490 static size_t page_group_index (char *, char *);
491 static void set_page_group_in_use (page_group *, char *);
492 static void clear_page_group_in_use (page_group *, char *);
493 #endif
494 static struct page_entry * alloc_page (unsigned);
495 static void free_page (struct page_entry *);
496 static void release_pages (void);
497 static void clear_marks (void);
498 static void sweep_pages (void);
499 static void ggc_recalculate_in_use_p (page_entry *);
500 static void compute_inverse (unsigned);
501 static inline void adjust_depth (void);
502 static void move_ptes_to_front (int, int);
503 
504 void debug_print_page_list (int);
505 static void push_depth (unsigned int);
506 static void push_by_depth (page_entry *, unsigned long *);
507 
508 /* Push an entry onto G.depth.  */
509 
510 inline static void
push_depth(unsigned int i)511 push_depth (unsigned int i)
512 {
513   if (G.depth_in_use >= G.depth_max)
514     {
515       G.depth_max *= 2;
516       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
517     }
518   G.depth[G.depth_in_use++] = i;
519 }
520 
521 /* Push an entry onto G.by_depth and G.save_in_use.  */
522 
523 inline static void
push_by_depth(page_entry * p,unsigned long * s)524 push_by_depth (page_entry *p, unsigned long *s)
525 {
526   if (G.by_depth_in_use >= G.by_depth_max)
527     {
528       G.by_depth_max *= 2;
529       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
530       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
531 				  G.by_depth_max);
532     }
533   G.by_depth[G.by_depth_in_use] = p;
534   G.save_in_use[G.by_depth_in_use++] = s;
535 }
536 
537 #if (GCC_VERSION < 3001)
538 #define prefetch(X) ((void) X)
539 #else
540 #define prefetch(X) __builtin_prefetch (X)
541 #endif
542 
543 #define save_in_use_p_i(__i) \
544   (G.save_in_use[__i])
545 #define save_in_use_p(__p) \
546   (save_in_use_p_i (__p->index_by_depth))
547 
548 /* Returns nonzero if P was allocated in GC'able memory.  */
549 
550 static inline int
ggc_allocated_p(const void * p)551 ggc_allocated_p (const void *p)
552 {
553   page_entry ***base;
554   size_t L1, L2;
555 
556 #if HOST_BITS_PER_PTR <= 32
557   base = &G.lookup[0];
558 #else
559   page_table table = G.lookup;
560   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
561   while (1)
562     {
563       if (table == NULL)
564 	return 0;
565       if (table->high_bits == high_bits)
566 	break;
567       table = table->next;
568     }
569   base = &table->table[0];
570 #endif
571 
572   /* Extract the level 1 and 2 indices.  */
573   L1 = LOOKUP_L1 (p);
574   L2 = LOOKUP_L2 (p);
575 
576   return base[L1] && base[L1][L2];
577 }
578 
579 /* Traverse the page table and find the entry for a page.
580    Die (probably) if the object wasn't allocated via GC.  */
581 
582 static inline page_entry *
lookup_page_table_entry(const void * p)583 lookup_page_table_entry (const void *p)
584 {
585   page_entry ***base;
586   size_t L1, L2;
587 
588 #if HOST_BITS_PER_PTR <= 32
589   base = &G.lookup[0];
590 #else
591   page_table table = G.lookup;
592   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
593   while (table->high_bits != high_bits)
594     table = table->next;
595   base = &table->table[0];
596 #endif
597 
598   /* Extract the level 1 and 2 indices.  */
599   L1 = LOOKUP_L1 (p);
600   L2 = LOOKUP_L2 (p);
601 
602   return base[L1][L2];
603 }
604 
605 /* Set the page table entry for a page.  */
606 
607 static void
set_page_table_entry(void * p,page_entry * entry)608 set_page_table_entry (void *p, page_entry *entry)
609 {
610   page_entry ***base;
611   size_t L1, L2;
612 
613 #if HOST_BITS_PER_PTR <= 32
614   base = &G.lookup[0];
615 #else
616   page_table table;
617   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
618   for (table = G.lookup; table; table = table->next)
619     if (table->high_bits == high_bits)
620       goto found;
621 
622   /* Not found -- allocate a new table.  */
623   table = XCNEW (struct page_table_chain);
624   table->next = G.lookup;
625   table->high_bits = high_bits;
626   G.lookup = table;
627 found:
628   base = &table->table[0];
629 #endif
630 
631   /* Extract the level 1 and 2 indices.  */
632   L1 = LOOKUP_L1 (p);
633   L2 = LOOKUP_L2 (p);
634 
635   if (base[L1] == NULL)
636     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
637 
638   base[L1][L2] = entry;
639 }
640 
641 /* Prints the page-entry for object size ORDER, for debugging.  */
642 
643 DEBUG_FUNCTION void
debug_print_page_list(int order)644 debug_print_page_list (int order)
645 {
646   page_entry *p;
647   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
648 	  (void *) G.page_tails[order]);
649   p = G.pages[order];
650   while (p != NULL)
651     {
652       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
653 	      p->num_free_objects);
654       p = p->next;
655     }
656   printf ("NULL\n");
657   fflush (stdout);
658 }
659 
660 #ifdef USING_MMAP
661 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
662    (if non-null).  The ifdef structure here is intended to cause a
663    compile error unless exactly one of the HAVE_* is defined.  */
664 
665 static inline char *
alloc_anon(char * pref ATTRIBUTE_UNUSED,size_t size,bool check)666 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
667 {
668 #ifdef HAVE_MMAP_ANON
669   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
670 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
671 #endif
672 #ifdef HAVE_MMAP_DEV_ZERO
673   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
674 			      MAP_PRIVATE, G.dev_zero_fd, 0);
675 #endif
676 
677   if (page == (char *) MAP_FAILED)
678     {
679       if (!check)
680         return NULL;
681       perror ("virtual memory exhausted");
682       exit (FATAL_EXIT_CODE);
683     }
684 
685   /* Remember that we allocated this memory.  */
686   G.bytes_mapped += size;
687 
688   /* Pretend we don't have access to the allocated pages.  We'll enable
689      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
690      handle to avoid handle leak.  */
691   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
692 
693   return page;
694 }
695 #endif
696 #ifdef USING_MALLOC_PAGE_GROUPS
697 /* Compute the index for this page into the page group.  */
698 
699 static inline size_t
page_group_index(char * allocation,char * page)700 page_group_index (char *allocation, char *page)
701 {
702   return (size_t) (page - allocation) >> G.lg_pagesize;
703 }
704 
705 /* Set and clear the in_use bit for this page in the page group.  */
706 
707 static inline void
set_page_group_in_use(page_group * group,char * page)708 set_page_group_in_use (page_group *group, char *page)
709 {
710   group->in_use |= 1 << page_group_index (group->allocation, page);
711 }
712 
713 static inline void
clear_page_group_in_use(page_group * group,char * page)714 clear_page_group_in_use (page_group *group, char *page)
715 {
716   group->in_use &= ~(1 << page_group_index (group->allocation, page));
717 }
718 #endif
719 
720 /* Allocate a new page for allocating objects of size 2^ORDER,
721    and return an entry for it.  The entry is not added to the
722    appropriate page_table list.  */
723 
724 static inline struct page_entry *
alloc_page(unsigned order)725 alloc_page (unsigned order)
726 {
727   struct page_entry *entry, *p, **pp;
728   char *page;
729   size_t num_objects;
730   size_t bitmap_size;
731   size_t page_entry_size;
732   size_t entry_size;
733 #ifdef USING_MALLOC_PAGE_GROUPS
734   page_group *group;
735 #endif
736 
737   num_objects = OBJECTS_PER_PAGE (order);
738   bitmap_size = BITMAP_SIZE (num_objects + 1);
739   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
740   entry_size = num_objects * OBJECT_SIZE (order);
741   if (entry_size < G.pagesize)
742     entry_size = G.pagesize;
743   entry_size = PAGE_ALIGN (entry_size);
744 
745   entry = NULL;
746   page = NULL;
747 
748   /* Check the list of free pages for one we can use.  */
749   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
750     if (p->bytes == entry_size)
751       break;
752 
753   if (p != NULL)
754     {
755       if (p->discarded)
756         G.bytes_mapped += p->bytes;
757       p->discarded = false;
758 
759       /* Recycle the allocated memory from this page ...  */
760       *pp = p->next;
761       page = p->page;
762 
763 #ifdef USING_MALLOC_PAGE_GROUPS
764       group = p->group;
765 #endif
766 
767       /* ... and, if possible, the page entry itself.  */
768       if (p->order == order)
769 	{
770 	  entry = p;
771 	  memset (entry, 0, page_entry_size);
772 	}
773       else
774 	free (p);
775     }
776 #ifdef USING_MMAP
777   else if (entry_size == G.pagesize)
778     {
779       /* We want just one page.  Allocate a bunch of them and put the
780 	 extras on the freelist.  (Can only do this optimization with
781 	 mmap for backing store.)  */
782       struct page_entry *e, *f = G.free_pages;
783       int i, entries = GGC_QUIRE_SIZE;
784 
785       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
786       if (page == NULL)
787      	{
788 	  page = alloc_anon(NULL, G.pagesize, true);
789           entries = 1;
790 	}
791 
792       /* This loop counts down so that the chain will be in ascending
793 	 memory order.  */
794       for (i = entries - 1; i >= 1; i--)
795 	{
796 	  e = XCNEWVAR (struct page_entry, page_entry_size);
797 	  e->order = order;
798 	  e->bytes = G.pagesize;
799 	  e->page = page + (i << G.lg_pagesize);
800 	  e->next = f;
801 	  f = e;
802 	}
803 
804       G.free_pages = f;
805     }
806   else
807     page = alloc_anon (NULL, entry_size, true);
808 #endif
809 #ifdef USING_MALLOC_PAGE_GROUPS
810   else
811     {
812       /* Allocate a large block of memory and serve out the aligned
813 	 pages therein.  This results in much less memory wastage
814 	 than the traditional implementation of valloc.  */
815 
816       char *allocation, *a, *enda;
817       size_t alloc_size, head_slop, tail_slop;
818       int multiple_pages = (entry_size == G.pagesize);
819 
820       if (multiple_pages)
821 	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
822       else
823 	alloc_size = entry_size + G.pagesize - 1;
824       allocation = XNEWVEC (char, alloc_size);
825 
826       page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
827       head_slop = page - allocation;
828       if (multiple_pages)
829 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
830       else
831 	tail_slop = alloc_size - entry_size - head_slop;
832       enda = allocation + alloc_size - tail_slop;
833 
834       /* We allocated N pages, which are likely not aligned, leaving
835 	 us with N-1 usable pages.  We plan to place the page_group
836 	 structure somewhere in the slop.  */
837       if (head_slop >= sizeof (page_group))
838 	group = (page_group *)page - 1;
839       else
840 	{
841 	  /* We magically got an aligned allocation.  Too bad, we have
842 	     to waste a page anyway.  */
843 	  if (tail_slop == 0)
844 	    {
845 	      enda -= G.pagesize;
846 	      tail_slop += G.pagesize;
847 	    }
848 	  gcc_assert (tail_slop >= sizeof (page_group));
849 	  group = (page_group *)enda;
850 	  tail_slop -= sizeof (page_group);
851 	}
852 
853       /* Remember that we allocated this memory.  */
854       group->next = G.page_groups;
855       group->allocation = allocation;
856       group->alloc_size = alloc_size;
857       group->in_use = 0;
858       G.page_groups = group;
859       G.bytes_mapped += alloc_size;
860 
861       /* If we allocated multiple pages, put the rest on the free list.  */
862       if (multiple_pages)
863 	{
864 	  struct page_entry *e, *f = G.free_pages;
865 	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
866 	    {
867 	      e = XCNEWVAR (struct page_entry, page_entry_size);
868 	      e->order = order;
869 	      e->bytes = G.pagesize;
870 	      e->page = a;
871 	      e->group = group;
872 	      e->next = f;
873 	      f = e;
874 	    }
875 	  G.free_pages = f;
876 	}
877     }
878 #endif
879 
880   if (entry == NULL)
881     entry = XCNEWVAR (struct page_entry, page_entry_size);
882 
883   entry->bytes = entry_size;
884   entry->page = page;
885   entry->context_depth = G.context_depth;
886   entry->order = order;
887   entry->num_free_objects = num_objects;
888   entry->next_bit_hint = 1;
889 
890   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
891 
892 #ifdef USING_MALLOC_PAGE_GROUPS
893   entry->group = group;
894   set_page_group_in_use (group, page);
895 #endif
896 
897   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
898      increment the hint.  */
899   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
900     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
901 
902   set_page_table_entry (page, entry);
903 
904   if (GGC_DEBUG_LEVEL >= 2)
905     fprintf (G.debug_file,
906 	     "Allocating page at %p, object size=%lu, data %p-%p\n",
907 	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
908 	     page + entry_size - 1);
909 
910   return entry;
911 }
912 
913 /* Adjust the size of G.depth so that no index greater than the one
914    used by the top of the G.by_depth is used.  */
915 
916 static inline void
adjust_depth(void)917 adjust_depth (void)
918 {
919   page_entry *top;
920 
921   if (G.by_depth_in_use)
922     {
923       top = G.by_depth[G.by_depth_in_use-1];
924 
925       /* Peel back indices in depth that index into by_depth, so that
926 	 as new elements are added to by_depth, we note the indices
927 	 of those elements, if they are for new context depths.  */
928       while (G.depth_in_use > (size_t)top->context_depth+1)
929 	--G.depth_in_use;
930     }
931 }
932 
933 /* For a page that is no longer needed, put it on the free page list.  */
934 
935 static void
free_page(page_entry * entry)936 free_page (page_entry *entry)
937 {
938   if (GGC_DEBUG_LEVEL >= 2)
939     fprintf (G.debug_file,
940 	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
941 	     entry->page, entry->page + entry->bytes - 1);
942 
943   /* Mark the page as inaccessible.  Discard the handle to avoid handle
944      leak.  */
945   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
946 
947   set_page_table_entry (entry->page, NULL);
948 
949 #ifdef USING_MALLOC_PAGE_GROUPS
950   clear_page_group_in_use (entry->group, entry->page);
951 #endif
952 
953   if (G.by_depth_in_use > 1)
954     {
955       page_entry *top = G.by_depth[G.by_depth_in_use-1];
956       int i = entry->index_by_depth;
957 
958       /* We cannot free a page from a context deeper than the current
959 	 one.  */
960       gcc_assert (entry->context_depth == top->context_depth);
961 
962       /* Put top element into freed slot.  */
963       G.by_depth[i] = top;
964       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
965       top->index_by_depth = i;
966     }
967   --G.by_depth_in_use;
968 
969   adjust_depth ();
970 
971   entry->next = G.free_pages;
972   G.free_pages = entry;
973 }
974 
975 /* Release the free page cache to the system.  */
976 
977 static void
release_pages(void)978 release_pages (void)
979 {
980 #ifdef USING_MADVISE
981   page_entry *p, *start_p;
982   char *start;
983   size_t len;
984   size_t mapped_len;
985   page_entry *next, *prev, *newprev;
986   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
987 
988   /* First free larger continuous areas to the OS.
989      This allows other allocators to grab these areas if needed.
990      This is only done on larger chunks to avoid fragmentation.
991      This does not always work because the free_pages list is only
992      approximately sorted. */
993 
994   p = G.free_pages;
995   prev = NULL;
996   while (p)
997     {
998       start = p->page;
999       start_p = p;
1000       len = 0;
1001       mapped_len = 0;
1002       newprev = prev;
1003       while (p && p->page == start + len)
1004         {
1005           len += p->bytes;
1006 	  if (!p->discarded)
1007 	      mapped_len += p->bytes;
1008 	  newprev = p;
1009           p = p->next;
1010         }
1011       if (len >= free_unit)
1012         {
1013           while (start_p != p)
1014             {
1015               next = start_p->next;
1016               free (start_p);
1017               start_p = next;
1018             }
1019           munmap (start, len);
1020 	  if (prev)
1021 	    prev->next = p;
1022           else
1023             G.free_pages = p;
1024           G.bytes_mapped -= mapped_len;
1025 	  continue;
1026         }
1027       prev = newprev;
1028    }
1029 
1030   /* Now give back the fragmented pages to the OS, but keep the address
1031      space to reuse it next time. */
1032 
1033   for (p = G.free_pages; p; )
1034     {
1035       if (p->discarded)
1036         {
1037           p = p->next;
1038           continue;
1039         }
1040       start = p->page;
1041       len = p->bytes;
1042       start_p = p;
1043       p = p->next;
1044       while (p && p->page == start + len)
1045         {
1046           len += p->bytes;
1047           p = p->next;
1048         }
1049       /* Give the page back to the kernel, but don't free the mapping.
1050          This avoids fragmentation in the virtual memory map of the
1051  	 process. Next time we can reuse it by just touching it. */
1052       madvise (start, len, MADV_DONTNEED);
1053       /* Don't count those pages as mapped to not touch the garbage collector
1054          unnecessarily. */
1055       G.bytes_mapped -= len;
1056       while (start_p != p)
1057         {
1058           start_p->discarded = true;
1059           start_p = start_p->next;
1060         }
1061     }
1062 #endif
1063 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1064   page_entry *p, *next;
1065   char *start;
1066   size_t len;
1067 
1068   /* Gather up adjacent pages so they are unmapped together.  */
1069   p = G.free_pages;
1070 
1071   while (p)
1072     {
1073       start = p->page;
1074       next = p->next;
1075       len = p->bytes;
1076       free (p);
1077       p = next;
1078 
1079       while (p && p->page == start + len)
1080 	{
1081 	  next = p->next;
1082 	  len += p->bytes;
1083 	  free (p);
1084 	  p = next;
1085 	}
1086 
1087       munmap (start, len);
1088       G.bytes_mapped -= len;
1089     }
1090 
1091   G.free_pages = NULL;
1092 #endif
1093 #ifdef USING_MALLOC_PAGE_GROUPS
1094   page_entry **pp, *p;
1095   page_group **gp, *g;
1096 
1097   /* Remove all pages from free page groups from the list.  */
1098   pp = &G.free_pages;
1099   while ((p = *pp) != NULL)
1100     if (p->group->in_use == 0)
1101       {
1102 	*pp = p->next;
1103 	free (p);
1104       }
1105     else
1106       pp = &p->next;
1107 
1108   /* Remove all free page groups, and release the storage.  */
1109   gp = &G.page_groups;
1110   while ((g = *gp) != NULL)
1111     if (g->in_use == 0)
1112       {
1113 	*gp = g->next;
1114 	G.bytes_mapped -= g->alloc_size;
1115 	free (g->allocation);
1116       }
1117     else
1118       gp = &g->next;
1119 #endif
1120 }
1121 
1122 /* This table provides a fast way to determine ceil(log_2(size)) for
1123    allocation requests.  The minimum allocation size is eight bytes.  */
1124 #define NUM_SIZE_LOOKUP 512
1125 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1126 {
1127   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1128   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1129   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1130   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1131   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1132   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1133   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1134   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1135   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1136   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1137   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1138   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1139   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1140   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1141   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1142   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1143   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1144   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1145   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1146   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1147   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1148   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1149   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1150   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1151   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1152   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1153   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1154   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1155   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1156   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1157   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1158   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1159 };
1160 
1161 /* For a given size of memory requested for allocation, return the
1162    actual size that is going to be allocated, as well as the size
1163    order.  */
1164 
1165 static void
ggc_round_alloc_size_1(size_t requested_size,size_t * size_order,size_t * alloced_size)1166 ggc_round_alloc_size_1 (size_t requested_size,
1167 			size_t *size_order,
1168 			size_t *alloced_size)
1169 {
1170   size_t order, object_size;
1171 
1172   if (requested_size < NUM_SIZE_LOOKUP)
1173     {
1174       order = size_lookup[requested_size];
1175       object_size = OBJECT_SIZE (order);
1176     }
1177   else
1178     {
1179       order = 10;
1180       while (requested_size > (object_size = OBJECT_SIZE (order)))
1181         order++;
1182     }
1183 
1184   if (size_order)
1185     *size_order = order;
1186   if (alloced_size)
1187     *alloced_size = object_size;
1188 }
1189 
1190 /* For a given size of memory requested for allocation, return the
1191    actual size that is going to be allocated.  */
1192 
1193 size_t
ggc_round_alloc_size(size_t requested_size)1194 ggc_round_alloc_size (size_t requested_size)
1195 {
1196   size_t size = 0;
1197 
1198   ggc_round_alloc_size_1 (requested_size, NULL, &size);
1199   return size;
1200 }
1201 
1202 /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1203 
1204 void *
ggc_internal_alloc_stat(size_t size MEM_STAT_DECL)1205 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1206 {
1207   size_t order, word, bit, object_offset, object_size;
1208   struct page_entry *entry;
1209   void *result;
1210 
1211   ggc_round_alloc_size_1 (size, &order, &object_size);
1212 
1213   /* If there are non-full pages for this size allocation, they are at
1214      the head of the list.  */
1215   entry = G.pages[order];
1216 
1217   /* If there is no page for this object size, or all pages in this
1218      context are full, allocate a new page.  */
1219   if (entry == NULL || entry->num_free_objects == 0)
1220     {
1221       struct page_entry *new_entry;
1222       new_entry = alloc_page (order);
1223 
1224       new_entry->index_by_depth = G.by_depth_in_use;
1225       push_by_depth (new_entry, 0);
1226 
1227       /* We can skip context depths, if we do, make sure we go all the
1228 	 way to the new depth.  */
1229       while (new_entry->context_depth >= G.depth_in_use)
1230 	push_depth (G.by_depth_in_use-1);
1231 
1232       /* If this is the only entry, it's also the tail.  If it is not
1233 	 the only entry, then we must update the PREV pointer of the
1234 	 ENTRY (G.pages[order]) to point to our new page entry.  */
1235       if (entry == NULL)
1236 	G.page_tails[order] = new_entry;
1237       else
1238 	entry->prev = new_entry;
1239 
1240       /* Put new pages at the head of the page list.  By definition the
1241 	 entry at the head of the list always has a NULL pointer.  */
1242       new_entry->next = entry;
1243       new_entry->prev = NULL;
1244       entry = new_entry;
1245       G.pages[order] = new_entry;
1246 
1247       /* For a new page, we know the word and bit positions (in the
1248 	 in_use bitmap) of the first available object -- they're zero.  */
1249       new_entry->next_bit_hint = 1;
1250       word = 0;
1251       bit = 0;
1252       object_offset = 0;
1253     }
1254   else
1255     {
1256       /* First try to use the hint left from the previous allocation
1257 	 to locate a clear bit in the in-use bitmap.  We've made sure
1258 	 that the one-past-the-end bit is always set, so if the hint
1259 	 has run over, this test will fail.  */
1260       unsigned hint = entry->next_bit_hint;
1261       word = hint / HOST_BITS_PER_LONG;
1262       bit = hint % HOST_BITS_PER_LONG;
1263 
1264       /* If the hint didn't work, scan the bitmap from the beginning.  */
1265       if ((entry->in_use_p[word] >> bit) & 1)
1266 	{
1267 	  word = bit = 0;
1268 	  while (~entry->in_use_p[word] == 0)
1269 	    ++word;
1270 
1271 #if GCC_VERSION >= 3004
1272 	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1273 #else
1274 	  while ((entry->in_use_p[word] >> bit) & 1)
1275 	    ++bit;
1276 #endif
1277 
1278 	  hint = word * HOST_BITS_PER_LONG + bit;
1279 	}
1280 
1281       /* Next time, try the next bit.  */
1282       entry->next_bit_hint = hint + 1;
1283 
1284       object_offset = hint * object_size;
1285     }
1286 
1287   /* Set the in-use bit.  */
1288   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1289 
1290   /* Keep a running total of the number of free objects.  If this page
1291      fills up, we may have to move it to the end of the list if the
1292      next page isn't full.  If the next page is full, all subsequent
1293      pages are full, so there's no need to move it.  */
1294   if (--entry->num_free_objects == 0
1295       && entry->next != NULL
1296       && entry->next->num_free_objects > 0)
1297     {
1298       /* We have a new head for the list.  */
1299       G.pages[order] = entry->next;
1300 
1301       /* We are moving ENTRY to the end of the page table list.
1302 	 The new page at the head of the list will have NULL in
1303 	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1304       entry->next->prev = NULL;
1305       entry->next = NULL;
1306 
1307       /* Append ENTRY to the tail of the list.  */
1308       entry->prev = G.page_tails[order];
1309       G.page_tails[order]->next = entry;
1310       G.page_tails[order] = entry;
1311     }
1312 
1313   /* Calculate the object's address.  */
1314   result = entry->page + object_offset;
1315   if (GATHER_STATISTICS)
1316     ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1317 			 result FINAL_PASS_MEM_STAT);
1318 
1319 #ifdef ENABLE_GC_CHECKING
1320   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1321      exact same semantics in presence of memory bugs, regardless of
1322      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1323      handle to avoid handle leak.  */
1324   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1325 
1326   /* `Poison' the entire allocated object, including any padding at
1327      the end.  */
1328   memset (result, 0xaf, object_size);
1329 
1330   /* Make the bytes after the end of the object unaccessible.  Discard the
1331      handle to avoid handle leak.  */
1332   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1333 						object_size - size));
1334 #endif
1335 
1336   /* Tell Valgrind that the memory is there, but its content isn't
1337      defined.  The bytes at the end of the object are still marked
1338      unaccessible.  */
1339   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1340 
1341   /* Keep track of how many bytes are being allocated.  This
1342      information is used in deciding when to collect.  */
1343   G.allocated += object_size;
1344 
1345   /* For timevar statistics.  */
1346   timevar_ggc_mem_total += object_size;
1347 
1348   if (GATHER_STATISTICS)
1349     {
1350       size_t overhead = object_size - size;
1351 
1352       G.stats.total_overhead += overhead;
1353       G.stats.total_allocated += object_size;
1354       G.stats.total_overhead_per_order[order] += overhead;
1355       G.stats.total_allocated_per_order[order] += object_size;
1356 
1357       if (size <= 32)
1358 	{
1359 	  G.stats.total_overhead_under32 += overhead;
1360 	  G.stats.total_allocated_under32 += object_size;
1361 	}
1362       if (size <= 64)
1363 	{
1364 	  G.stats.total_overhead_under64 += overhead;
1365 	  G.stats.total_allocated_under64 += object_size;
1366 	}
1367       if (size <= 128)
1368 	{
1369 	  G.stats.total_overhead_under128 += overhead;
1370 	  G.stats.total_allocated_under128 += object_size;
1371 	}
1372     }
1373 
1374   if (GGC_DEBUG_LEVEL >= 3)
1375     fprintf (G.debug_file,
1376 	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1377 	     (unsigned long) size, (unsigned long) object_size, result,
1378 	     (void *) entry);
1379 
1380   return result;
1381 }
1382 
1383 /* Mark function for strings.  */
1384 
1385 void
gt_ggc_m_S(const void * p)1386 gt_ggc_m_S (const void *p)
1387 {
1388   page_entry *entry;
1389   unsigned bit, word;
1390   unsigned long mask;
1391   unsigned long offset;
1392 
1393   if (!p || !ggc_allocated_p (p))
1394     return;
1395 
1396   /* Look up the page on which the object is alloced.  .  */
1397   entry = lookup_page_table_entry (p);
1398   gcc_assert (entry);
1399 
1400   /* Calculate the index of the object on the page; this is its bit
1401      position in the in_use_p bitmap.  Note that because a char* might
1402      point to the middle of an object, we need special code here to
1403      make sure P points to the start of an object.  */
1404   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1405   if (offset)
1406     {
1407       /* Here we've seen a char* which does not point to the beginning
1408 	 of an allocated object.  We assume it points to the middle of
1409 	 a STRING_CST.  */
1410       gcc_assert (offset == offsetof (struct tree_string, str));
1411       p = ((const char *) p) - offset;
1412       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1413       return;
1414     }
1415 
1416   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1417   word = bit / HOST_BITS_PER_LONG;
1418   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1419 
1420   /* If the bit was previously set, skip it.  */
1421   if (entry->in_use_p[word] & mask)
1422     return;
1423 
1424   /* Otherwise set it, and decrement the free object count.  */
1425   entry->in_use_p[word] |= mask;
1426   entry->num_free_objects -= 1;
1427 
1428   if (GGC_DEBUG_LEVEL >= 4)
1429     fprintf (G.debug_file, "Marking %p\n", p);
1430 
1431   return;
1432 }
1433 
1434 
1435 /* User-callable entry points for marking string X.  */
1436 
1437 void
gt_ggc_mx(const char * & x)1438 gt_ggc_mx (const char *& x)
1439 {
1440   gt_ggc_m_S (x);
1441 }
1442 
1443 void
gt_ggc_mx(unsigned char * & x)1444 gt_ggc_mx (unsigned char *& x)
1445 {
1446   gt_ggc_m_S (x);
1447 }
1448 
1449 void
gt_ggc_mx(unsigned char & x ATTRIBUTE_UNUSED)1450 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1451 {
1452 }
1453 
1454 /* If P is not marked, marks it and return false.  Otherwise return true.
1455    P must have been allocated by the GC allocator; it mustn't point to
1456    static objects, stack variables, or memory allocated with malloc.  */
1457 
1458 int
ggc_set_mark(const void * p)1459 ggc_set_mark (const void *p)
1460 {
1461   page_entry *entry;
1462   unsigned bit, word;
1463   unsigned long mask;
1464 
1465   /* Look up the page on which the object is alloced.  If the object
1466      wasn't allocated by the collector, we'll probably die.  */
1467   entry = lookup_page_table_entry (p);
1468   gcc_assert (entry);
1469 
1470   /* Calculate the index of the object on the page; this is its bit
1471      position in the in_use_p bitmap.  */
1472   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1473   word = bit / HOST_BITS_PER_LONG;
1474   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1475 
1476   /* If the bit was previously set, skip it.  */
1477   if (entry->in_use_p[word] & mask)
1478     return 1;
1479 
1480   /* Otherwise set it, and decrement the free object count.  */
1481   entry->in_use_p[word] |= mask;
1482   entry->num_free_objects -= 1;
1483 
1484   if (GGC_DEBUG_LEVEL >= 4)
1485     fprintf (G.debug_file, "Marking %p\n", p);
1486 
1487   return 0;
1488 }
1489 
1490 /* Return 1 if P has been marked, zero otherwise.
1491    P must have been allocated by the GC allocator; it mustn't point to
1492    static objects, stack variables, or memory allocated with malloc.  */
1493 
1494 int
ggc_marked_p(const void * p)1495 ggc_marked_p (const void *p)
1496 {
1497   page_entry *entry;
1498   unsigned bit, word;
1499   unsigned long mask;
1500 
1501   /* Look up the page on which the object is alloced.  If the object
1502      wasn't allocated by the collector, we'll probably die.  */
1503   entry = lookup_page_table_entry (p);
1504   gcc_assert (entry);
1505 
1506   /* Calculate the index of the object on the page; this is its bit
1507      position in the in_use_p bitmap.  */
1508   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1509   word = bit / HOST_BITS_PER_LONG;
1510   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1511 
1512   return (entry->in_use_p[word] & mask) != 0;
1513 }
1514 
1515 /* Return the size of the gc-able object P.  */
1516 
1517 size_t
ggc_get_size(const void * p)1518 ggc_get_size (const void *p)
1519 {
1520   page_entry *pe = lookup_page_table_entry (p);
1521   return OBJECT_SIZE (pe->order);
1522 }
1523 
1524 /* Release the memory for object P.  */
1525 
1526 void
ggc_free(void * p)1527 ggc_free (void *p)
1528 {
1529   page_entry *pe = lookup_page_table_entry (p);
1530   size_t order = pe->order;
1531   size_t size = OBJECT_SIZE (order);
1532 
1533   if (GATHER_STATISTICS)
1534     ggc_free_overhead (p);
1535 
1536   if (GGC_DEBUG_LEVEL >= 3)
1537     fprintf (G.debug_file,
1538 	     "Freeing object, actual size=%lu, at %p on %p\n",
1539 	     (unsigned long) size, p, (void *) pe);
1540 
1541 #ifdef ENABLE_GC_CHECKING
1542   /* Poison the data, to indicate the data is garbage.  */
1543   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1544   memset (p, 0xa5, size);
1545 #endif
1546   /* Let valgrind know the object is free.  */
1547   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1548 
1549 #ifdef ENABLE_GC_ALWAYS_COLLECT
1550   /* In the completely-anal-checking mode, we do *not* immediately free
1551      the data, but instead verify that the data is *actually* not
1552      reachable the next time we collect.  */
1553   {
1554     struct free_object *fo = XNEW (struct free_object);
1555     fo->object = p;
1556     fo->next = G.free_object_list;
1557     G.free_object_list = fo;
1558   }
1559 #else
1560   {
1561     unsigned int bit_offset, word, bit;
1562 
1563     G.allocated -= size;
1564 
1565     /* Mark the object not-in-use.  */
1566     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1567     word = bit_offset / HOST_BITS_PER_LONG;
1568     bit = bit_offset % HOST_BITS_PER_LONG;
1569     pe->in_use_p[word] &= ~(1UL << bit);
1570 
1571     if (pe->num_free_objects++ == 0)
1572       {
1573 	page_entry *p, *q;
1574 
1575 	/* If the page is completely full, then it's supposed to
1576 	   be after all pages that aren't.  Since we've freed one
1577 	   object from a page that was full, we need to move the
1578 	   page to the head of the list.
1579 
1580 	   PE is the node we want to move.  Q is the previous node
1581 	   and P is the next node in the list.  */
1582 	q = pe->prev;
1583 	if (q && q->num_free_objects == 0)
1584 	  {
1585 	    p = pe->next;
1586 
1587 	    q->next = p;
1588 
1589 	    /* If PE was at the end of the list, then Q becomes the
1590 	       new end of the list.  If PE was not the end of the
1591 	       list, then we need to update the PREV field for P.  */
1592 	    if (!p)
1593 	      G.page_tails[order] = q;
1594 	    else
1595 	      p->prev = q;
1596 
1597 	    /* Move PE to the head of the list.  */
1598 	    pe->next = G.pages[order];
1599 	    pe->prev = NULL;
1600 	    G.pages[order]->prev = pe;
1601 	    G.pages[order] = pe;
1602 	  }
1603 
1604 	/* Reset the hint bit to point to the only free object.  */
1605 	pe->next_bit_hint = bit_offset;
1606       }
1607   }
1608 #endif
1609 }
1610 
1611 /* Subroutine of init_ggc which computes the pair of numbers used to
1612    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1613 
1614    This algorithm is taken from Granlund and Montgomery's paper
1615    "Division by Invariant Integers using Multiplication"
1616    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1617    constants).  */
1618 
1619 static void
compute_inverse(unsigned order)1620 compute_inverse (unsigned order)
1621 {
1622   size_t size, inv;
1623   unsigned int e;
1624 
1625   size = OBJECT_SIZE (order);
1626   e = 0;
1627   while (size % 2 == 0)
1628     {
1629       e++;
1630       size >>= 1;
1631     }
1632 
1633   inv = size;
1634   while (inv * size != 1)
1635     inv = inv * (2 - inv*size);
1636 
1637   DIV_MULT (order) = inv;
1638   DIV_SHIFT (order) = e;
1639 }
1640 
1641 /* Initialize the ggc-mmap allocator.  */
1642 void
init_ggc(void)1643 init_ggc (void)
1644 {
1645   unsigned order;
1646 
1647   G.pagesize = getpagesize();
1648   G.lg_pagesize = exact_log2 (G.pagesize);
1649 
1650 #ifdef HAVE_MMAP_DEV_ZERO
1651   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1652   if (G.dev_zero_fd == -1)
1653     internal_error ("open /dev/zero: %m");
1654 #endif
1655 
1656 #if 0
1657   G.debug_file = fopen ("ggc-mmap.debug", "w");
1658 #else
1659   G.debug_file = stdout;
1660 #endif
1661 
1662 #ifdef USING_MMAP
1663   /* StunOS has an amazing off-by-one error for the first mmap allocation
1664      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1665      believe, is an unaligned page allocation, which would cause us to
1666      hork badly if we tried to use it.  */
1667   {
1668     char *p = alloc_anon (NULL, G.pagesize, true);
1669     struct page_entry *e;
1670     if ((uintptr_t)p & (G.pagesize - 1))
1671       {
1672 	/* How losing.  Discard this one and try another.  If we still
1673 	   can't get something useful, give up.  */
1674 
1675 	p = alloc_anon (NULL, G.pagesize, true);
1676 	gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1677       }
1678 
1679     /* We have a good page, might as well hold onto it...  */
1680     e = XCNEW (struct page_entry);
1681     e->bytes = G.pagesize;
1682     e->page = p;
1683     e->next = G.free_pages;
1684     G.free_pages = e;
1685   }
1686 #endif
1687 
1688   /* Initialize the object size table.  */
1689   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1690     object_size_table[order] = (size_t) 1 << order;
1691   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1692     {
1693       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1694 
1695       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1696 	 so that we're sure of getting aligned memory.  */
1697       s = ROUND_UP (s, MAX_ALIGNMENT);
1698       object_size_table[order] = s;
1699     }
1700 
1701   /* Initialize the objects-per-page and inverse tables.  */
1702   for (order = 0; order < NUM_ORDERS; ++order)
1703     {
1704       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1705       if (objects_per_page_table[order] == 0)
1706 	objects_per_page_table[order] = 1;
1707       compute_inverse (order);
1708     }
1709 
1710   /* Reset the size_lookup array to put appropriately sized objects in
1711      the special orders.  All objects bigger than the previous power
1712      of two, but no greater than the special size, should go in the
1713      new order.  */
1714   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1715     {
1716       int o;
1717       int i;
1718 
1719       i = OBJECT_SIZE (order);
1720       if (i >= NUM_SIZE_LOOKUP)
1721 	continue;
1722 
1723       for (o = size_lookup[i]; o == size_lookup [i]; --i)
1724 	size_lookup[i] = order;
1725     }
1726 
1727   G.depth_in_use = 0;
1728   G.depth_max = 10;
1729   G.depth = XNEWVEC (unsigned int, G.depth_max);
1730 
1731   G.by_depth_in_use = 0;
1732   G.by_depth_max = INITIAL_PTE_COUNT;
1733   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1734   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1735 }
1736 
1737 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1738    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1739 
1740 static void
ggc_recalculate_in_use_p(page_entry * p)1741 ggc_recalculate_in_use_p (page_entry *p)
1742 {
1743   unsigned int i;
1744   size_t num_objects;
1745 
1746   /* Because the past-the-end bit in in_use_p is always set, we
1747      pretend there is one additional object.  */
1748   num_objects = OBJECTS_IN_PAGE (p) + 1;
1749 
1750   /* Reset the free object count.  */
1751   p->num_free_objects = num_objects;
1752 
1753   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1754   for (i = 0;
1755        i < CEIL (BITMAP_SIZE (num_objects),
1756 		 sizeof (*p->in_use_p));
1757        ++i)
1758     {
1759       unsigned long j;
1760 
1761       /* Something is in use if it is marked, or if it was in use in a
1762 	 context further down the context stack.  */
1763       p->in_use_p[i] |= save_in_use_p (p)[i];
1764 
1765       /* Decrement the free object count for every object allocated.  */
1766       for (j = p->in_use_p[i]; j; j >>= 1)
1767 	p->num_free_objects -= (j & 1);
1768     }
1769 
1770   gcc_assert (p->num_free_objects < num_objects);
1771 }
1772 
1773 /* Unmark all objects.  */
1774 
1775 static void
clear_marks(void)1776 clear_marks (void)
1777 {
1778   unsigned order;
1779 
1780   for (order = 2; order < NUM_ORDERS; order++)
1781     {
1782       page_entry *p;
1783 
1784       for (p = G.pages[order]; p != NULL; p = p->next)
1785 	{
1786 	  size_t num_objects = OBJECTS_IN_PAGE (p);
1787 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1788 
1789 	  /* The data should be page-aligned.  */
1790 	  gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1791 
1792 	  /* Pages that aren't in the topmost context are not collected;
1793 	     nevertheless, we need their in-use bit vectors to store GC
1794 	     marks.  So, back them up first.  */
1795 	  if (p->context_depth < G.context_depth)
1796 	    {
1797 	      if (! save_in_use_p (p))
1798 		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1799 	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1800 	    }
1801 
1802 	  /* Reset reset the number of free objects and clear the
1803              in-use bits.  These will be adjusted by mark_obj.  */
1804 	  p->num_free_objects = num_objects;
1805 	  memset (p->in_use_p, 0, bitmap_size);
1806 
1807 	  /* Make sure the one-past-the-end bit is always set.  */
1808 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1809 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1810 	}
1811     }
1812 }
1813 
1814 /* Free all empty pages.  Partially empty pages need no attention
1815    because the `mark' bit doubles as an `unused' bit.  */
1816 
1817 static void
sweep_pages(void)1818 sweep_pages (void)
1819 {
1820   unsigned order;
1821 
1822   for (order = 2; order < NUM_ORDERS; order++)
1823     {
1824       /* The last page-entry to consider, regardless of entries
1825 	 placed at the end of the list.  */
1826       page_entry * const last = G.page_tails[order];
1827 
1828       size_t num_objects;
1829       size_t live_objects;
1830       page_entry *p, *previous;
1831       int done;
1832 
1833       p = G.pages[order];
1834       if (p == NULL)
1835 	continue;
1836 
1837       previous = NULL;
1838       do
1839 	{
1840 	  page_entry *next = p->next;
1841 
1842 	  /* Loop until all entries have been examined.  */
1843 	  done = (p == last);
1844 
1845 	  num_objects = OBJECTS_IN_PAGE (p);
1846 
1847 	  /* Add all live objects on this page to the count of
1848              allocated memory.  */
1849 	  live_objects = num_objects - p->num_free_objects;
1850 
1851 	  G.allocated += OBJECT_SIZE (order) * live_objects;
1852 
1853 	  /* Only objects on pages in the topmost context should get
1854 	     collected.  */
1855 	  if (p->context_depth < G.context_depth)
1856 	    ;
1857 
1858 	  /* Remove the page if it's empty.  */
1859 	  else if (live_objects == 0)
1860 	    {
1861 	      /* If P was the first page in the list, then NEXT
1862 		 becomes the new first page in the list, otherwise
1863 		 splice P out of the forward pointers.  */
1864 	      if (! previous)
1865 		G.pages[order] = next;
1866 	      else
1867 		previous->next = next;
1868 
1869 	      /* Splice P out of the back pointers too.  */
1870 	      if (next)
1871 		next->prev = previous;
1872 
1873 	      /* Are we removing the last element?  */
1874 	      if (p == G.page_tails[order])
1875 		G.page_tails[order] = previous;
1876 	      free_page (p);
1877 	      p = previous;
1878 	    }
1879 
1880 	  /* If the page is full, move it to the end.  */
1881 	  else if (p->num_free_objects == 0)
1882 	    {
1883 	      /* Don't move it if it's already at the end.  */
1884 	      if (p != G.page_tails[order])
1885 		{
1886 		  /* Move p to the end of the list.  */
1887 		  p->next = NULL;
1888 		  p->prev = G.page_tails[order];
1889 		  G.page_tails[order]->next = p;
1890 
1891 		  /* Update the tail pointer...  */
1892 		  G.page_tails[order] = p;
1893 
1894 		  /* ... and the head pointer, if necessary.  */
1895 		  if (! previous)
1896 		    G.pages[order] = next;
1897 		  else
1898 		    previous->next = next;
1899 
1900 		  /* And update the backpointer in NEXT if necessary.  */
1901 		  if (next)
1902 		    next->prev = previous;
1903 
1904 		  p = previous;
1905 		}
1906 	    }
1907 
1908 	  /* If we've fallen through to here, it's a page in the
1909 	     topmost context that is neither full nor empty.  Such a
1910 	     page must precede pages at lesser context depth in the
1911 	     list, so move it to the head.  */
1912 	  else if (p != G.pages[order])
1913 	    {
1914 	      previous->next = p->next;
1915 
1916 	      /* Update the backchain in the next node if it exists.  */
1917 	      if (p->next)
1918 		p->next->prev = previous;
1919 
1920 	      /* Move P to the head of the list.  */
1921 	      p->next = G.pages[order];
1922 	      p->prev = NULL;
1923 	      G.pages[order]->prev = p;
1924 
1925 	      /* Update the head pointer.  */
1926 	      G.pages[order] = p;
1927 
1928 	      /* Are we moving the last element?  */
1929 	      if (G.page_tails[order] == p)
1930 	        G.page_tails[order] = previous;
1931 	      p = previous;
1932 	    }
1933 
1934 	  previous = p;
1935 	  p = next;
1936 	}
1937       while (! done);
1938 
1939       /* Now, restore the in_use_p vectors for any pages from contexts
1940          other than the current one.  */
1941       for (p = G.pages[order]; p; p = p->next)
1942 	if (p->context_depth != G.context_depth)
1943 	  ggc_recalculate_in_use_p (p);
1944     }
1945 }
1946 
1947 #ifdef ENABLE_GC_CHECKING
1948 /* Clobber all free objects.  */
1949 
1950 static void
poison_pages(void)1951 poison_pages (void)
1952 {
1953   unsigned order;
1954 
1955   for (order = 2; order < NUM_ORDERS; order++)
1956     {
1957       size_t size = OBJECT_SIZE (order);
1958       page_entry *p;
1959 
1960       for (p = G.pages[order]; p != NULL; p = p->next)
1961 	{
1962 	  size_t num_objects;
1963 	  size_t i;
1964 
1965 	  if (p->context_depth != G.context_depth)
1966 	    /* Since we don't do any collection for pages in pushed
1967 	       contexts, there's no need to do any poisoning.  And
1968 	       besides, the IN_USE_P array isn't valid until we pop
1969 	       contexts.  */
1970 	    continue;
1971 
1972 	  num_objects = OBJECTS_IN_PAGE (p);
1973 	  for (i = 0; i < num_objects; i++)
1974 	    {
1975 	      size_t word, bit;
1976 	      word = i / HOST_BITS_PER_LONG;
1977 	      bit = i % HOST_BITS_PER_LONG;
1978 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
1979 		{
1980 		  char *object = p->page + i * size;
1981 
1982 		  /* Keep poison-by-write when we expect to use Valgrind,
1983 		     so the exact same memory semantics is kept, in case
1984 		     there are memory errors.  We override this request
1985 		     below.  */
1986 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1987 								 size));
1988 		  memset (object, 0xa5, size);
1989 
1990 		  /* Drop the handle to avoid handle leak.  */
1991 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1992 		}
1993 	    }
1994 	}
1995     }
1996 }
1997 #else
1998 #define poison_pages()
1999 #endif
2000 
2001 #ifdef ENABLE_GC_ALWAYS_COLLECT
2002 /* Validate that the reportedly free objects actually are.  */
2003 
2004 static void
validate_free_objects(void)2005 validate_free_objects (void)
2006 {
2007   struct free_object *f, *next, *still_free = NULL;
2008 
2009   for (f = G.free_object_list; f ; f = next)
2010     {
2011       page_entry *pe = lookup_page_table_entry (f->object);
2012       size_t bit, word;
2013 
2014       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2015       word = bit / HOST_BITS_PER_LONG;
2016       bit = bit % HOST_BITS_PER_LONG;
2017       next = f->next;
2018 
2019       /* Make certain it isn't visible from any root.  Notice that we
2020 	 do this check before sweep_pages merges save_in_use_p.  */
2021       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2022 
2023       /* If the object comes from an outer context, then retain the
2024 	 free_object entry, so that we can verify that the address
2025 	 isn't live on the stack in some outer context.  */
2026       if (pe->context_depth != G.context_depth)
2027 	{
2028 	  f->next = still_free;
2029 	  still_free = f;
2030 	}
2031       else
2032 	free (f);
2033     }
2034 
2035   G.free_object_list = still_free;
2036 }
2037 #else
2038 #define validate_free_objects()
2039 #endif
2040 
2041 /* Top level mark-and-sweep routine.  */
2042 
2043 void
ggc_collect(void)2044 ggc_collect (void)
2045 {
2046   /* Avoid frequent unnecessary work by skipping collection if the
2047      total allocations haven't expanded much since the last
2048      collection.  */
2049   float allocated_last_gc =
2050     MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2051 
2052   float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2053 
2054   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2055     return;
2056 
2057   timevar_push (TV_GC);
2058   if (!quiet_flag)
2059     fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2060   if (GGC_DEBUG_LEVEL >= 2)
2061     fprintf (G.debug_file, "BEGIN COLLECTING\n");
2062 
2063   /* Zero the total allocated bytes.  This will be recalculated in the
2064      sweep phase.  */
2065   G.allocated = 0;
2066 
2067   /* Release the pages we freed the last time we collected, but didn't
2068      reuse in the interim.  */
2069   release_pages ();
2070 
2071   /* Indicate that we've seen collections at this context depth.  */
2072   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2073 
2074   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2075 
2076   clear_marks ();
2077   ggc_mark_roots ();
2078 
2079   if (GATHER_STATISTICS)
2080     ggc_prune_overhead_list ();
2081 
2082   poison_pages ();
2083   validate_free_objects ();
2084   sweep_pages ();
2085 
2086   G.allocated_last_gc = G.allocated;
2087 
2088   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2089 
2090   timevar_pop (TV_GC);
2091 
2092   if (!quiet_flag)
2093     fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2094   if (GGC_DEBUG_LEVEL >= 2)
2095     fprintf (G.debug_file, "END COLLECTING\n");
2096 }
2097 
2098 /* Print allocation statistics.  */
2099 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2100 		  ? (x) \
2101 		  : ((x) < 1024*1024*10 \
2102 		     ? (x) / 1024 \
2103 		     : (x) / (1024*1024))))
2104 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2105 
2106 void
ggc_print_statistics(void)2107 ggc_print_statistics (void)
2108 {
2109   struct ggc_statistics stats;
2110   unsigned int i;
2111   size_t total_overhead = 0;
2112 
2113   /* Clear the statistics.  */
2114   memset (&stats, 0, sizeof (stats));
2115 
2116   /* Make sure collection will really occur.  */
2117   G.allocated_last_gc = 0;
2118 
2119   /* Collect and print the statistics common across collectors.  */
2120   ggc_print_common_statistics (stderr, &stats);
2121 
2122   /* Release free pages so that we will not count the bytes allocated
2123      there as part of the total allocated memory.  */
2124   release_pages ();
2125 
2126   /* Collect some information about the various sizes of
2127      allocation.  */
2128   fprintf (stderr,
2129            "Memory still allocated at the end of the compilation process\n");
2130   fprintf (stderr, "%-5s %10s  %10s  %10s\n",
2131 	   "Size", "Allocated", "Used", "Overhead");
2132   for (i = 0; i < NUM_ORDERS; ++i)
2133     {
2134       page_entry *p;
2135       size_t allocated;
2136       size_t in_use;
2137       size_t overhead;
2138 
2139       /* Skip empty entries.  */
2140       if (!G.pages[i])
2141 	continue;
2142 
2143       overhead = allocated = in_use = 0;
2144 
2145       /* Figure out the total number of bytes allocated for objects of
2146 	 this size, and how many of them are actually in use.  Also figure
2147 	 out how much memory the page table is using.  */
2148       for (p = G.pages[i]; p; p = p->next)
2149 	{
2150 	  allocated += p->bytes;
2151 	  in_use +=
2152 	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2153 
2154 	  overhead += (sizeof (page_entry) - sizeof (long)
2155 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2156 	}
2157       fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2158 	       (unsigned long) OBJECT_SIZE (i),
2159 	       SCALE (allocated), STAT_LABEL (allocated),
2160 	       SCALE (in_use), STAT_LABEL (in_use),
2161 	       SCALE (overhead), STAT_LABEL (overhead));
2162       total_overhead += overhead;
2163     }
2164   fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2165 	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2166 	   SCALE (G.allocated), STAT_LABEL(G.allocated),
2167 	   SCALE (total_overhead), STAT_LABEL (total_overhead));
2168 
2169   if (GATHER_STATISTICS)
2170     {
2171       fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2172 
2173       fprintf (stderr, "Total Overhead:                        %10" HOST_LONG_LONG_FORMAT "d\n",
2174 	       G.stats.total_overhead);
2175       fprintf (stderr, "Total Allocated:                       %10" HOST_LONG_LONG_FORMAT "d\n",
2176 	       G.stats.total_allocated);
2177 
2178       fprintf (stderr, "Total Overhead  under  32B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2179 	       G.stats.total_overhead_under32);
2180       fprintf (stderr, "Total Allocated under  32B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2181 	       G.stats.total_allocated_under32);
2182       fprintf (stderr, "Total Overhead  under  64B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2183 	       G.stats.total_overhead_under64);
2184       fprintf (stderr, "Total Allocated under  64B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2185 	       G.stats.total_allocated_under64);
2186       fprintf (stderr, "Total Overhead  under 128B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2187 	       G.stats.total_overhead_under128);
2188       fprintf (stderr, "Total Allocated under 128B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2189 	       G.stats.total_allocated_under128);
2190 
2191       for (i = 0; i < NUM_ORDERS; i++)
2192 	if (G.stats.total_allocated_per_order[i])
2193 	  {
2194 	    fprintf (stderr, "Total Overhead  page size %7lu:     %10" HOST_LONG_LONG_FORMAT "d\n",
2195 		     (unsigned long) OBJECT_SIZE (i),
2196 		     G.stats.total_overhead_per_order[i]);
2197 	    fprintf (stderr, "Total Allocated page size %7lu:     %10" HOST_LONG_LONG_FORMAT "d\n",
2198 		     (unsigned long) OBJECT_SIZE (i),
2199 		     G.stats.total_allocated_per_order[i]);
2200 	  }
2201   }
2202 }
2203 
2204 struct ggc_pch_ondisk
2205 {
2206   unsigned totals[NUM_ORDERS];
2207 };
2208 
2209 struct ggc_pch_data
2210 {
2211   struct ggc_pch_ondisk d;
2212   uintptr_t base[NUM_ORDERS];
2213   size_t written[NUM_ORDERS];
2214 };
2215 
2216 struct ggc_pch_data *
init_ggc_pch(void)2217 init_ggc_pch (void)
2218 {
2219   return XCNEW (struct ggc_pch_data);
2220 }
2221 
2222 void
ggc_pch_count_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2223 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2224 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2225 {
2226   unsigned order;
2227 
2228   if (size < NUM_SIZE_LOOKUP)
2229     order = size_lookup[size];
2230   else
2231     {
2232       order = 10;
2233       while (size > OBJECT_SIZE (order))
2234 	order++;
2235     }
2236 
2237   d->d.totals[order]++;
2238 }
2239 
2240 size_t
ggc_pch_total_size(struct ggc_pch_data * d)2241 ggc_pch_total_size (struct ggc_pch_data *d)
2242 {
2243   size_t a = 0;
2244   unsigned i;
2245 
2246   for (i = 0; i < NUM_ORDERS; i++)
2247     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2248   return a;
2249 }
2250 
2251 void
ggc_pch_this_base(struct ggc_pch_data * d,void * base)2252 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2253 {
2254   uintptr_t a = (uintptr_t) base;
2255   unsigned i;
2256 
2257   for (i = 0; i < NUM_ORDERS; i++)
2258     {
2259       d->base[i] = a;
2260       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2261     }
2262 }
2263 
2264 
2265 char *
ggc_pch_alloc_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2266 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2267 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2268 {
2269   unsigned order;
2270   char *result;
2271 
2272   if (size < NUM_SIZE_LOOKUP)
2273     order = size_lookup[size];
2274   else
2275     {
2276       order = 10;
2277       while (size > OBJECT_SIZE (order))
2278 	order++;
2279     }
2280 
2281   result = (char *) d->base[order];
2282   d->base[order] += OBJECT_SIZE (order);
2283   return result;
2284 }
2285 
2286 void
ggc_pch_prepare_write(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f ATTRIBUTE_UNUSED)2287 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2288 		       FILE *f ATTRIBUTE_UNUSED)
2289 {
2290   /* Nothing to do.  */
2291 }
2292 
2293 void
ggc_pch_write_object(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f,void * x,void * newx ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2294 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2295 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2296 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2297 {
2298   unsigned order;
2299   static const char emptyBytes[256] = { 0 };
2300 
2301   if (size < NUM_SIZE_LOOKUP)
2302     order = size_lookup[size];
2303   else
2304     {
2305       order = 10;
2306       while (size > OBJECT_SIZE (order))
2307 	order++;
2308     }
2309 
2310   if (fwrite (x, size, 1, f) != 1)
2311     fatal_error ("can%'t write PCH file: %m");
2312 
2313   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2314      object out to OBJECT_SIZE(order).  This happens for strings.  */
2315 
2316   if (size != OBJECT_SIZE (order))
2317     {
2318       unsigned padding = OBJECT_SIZE(order) - size;
2319 
2320       /* To speed small writes, we use a nulled-out array that's larger
2321          than most padding requests as the source for our null bytes.  This
2322          permits us to do the padding with fwrite() rather than fseek(), and
2323          limits the chance the OS may try to flush any outstanding writes.  */
2324       if (padding <= sizeof(emptyBytes))
2325         {
2326           if (fwrite (emptyBytes, 1, padding, f) != padding)
2327             fatal_error ("can%'t write PCH file");
2328         }
2329       else
2330         {
2331           /* Larger than our buffer?  Just default to fseek.  */
2332           if (fseek (f, padding, SEEK_CUR) != 0)
2333             fatal_error ("can%'t write PCH file");
2334         }
2335     }
2336 
2337   d->written[order]++;
2338   if (d->written[order] == d->d.totals[order]
2339       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2340 				   G.pagesize),
2341 		SEEK_CUR) != 0)
2342     fatal_error ("can%'t write PCH file: %m");
2343 }
2344 
2345 void
ggc_pch_finish(struct ggc_pch_data * d,FILE * f)2346 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2347 {
2348   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2349     fatal_error ("can%'t write PCH file: %m");
2350   free (d);
2351 }
2352 
2353 /* Move the PCH PTE entries just added to the end of by_depth, to the
2354    front.  */
2355 
2356 static void
move_ptes_to_front(int count_old_page_tables,int count_new_page_tables)2357 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2358 {
2359   unsigned i;
2360 
2361   /* First, we swap the new entries to the front of the varrays.  */
2362   page_entry **new_by_depth;
2363   unsigned long **new_save_in_use;
2364 
2365   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2366   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2367 
2368   memcpy (&new_by_depth[0],
2369 	  &G.by_depth[count_old_page_tables],
2370 	  count_new_page_tables * sizeof (void *));
2371   memcpy (&new_by_depth[count_new_page_tables],
2372 	  &G.by_depth[0],
2373 	  count_old_page_tables * sizeof (void *));
2374   memcpy (&new_save_in_use[0],
2375 	  &G.save_in_use[count_old_page_tables],
2376 	  count_new_page_tables * sizeof (void *));
2377   memcpy (&new_save_in_use[count_new_page_tables],
2378 	  &G.save_in_use[0],
2379 	  count_old_page_tables * sizeof (void *));
2380 
2381   free (G.by_depth);
2382   free (G.save_in_use);
2383 
2384   G.by_depth = new_by_depth;
2385   G.save_in_use = new_save_in_use;
2386 
2387   /* Now update all the index_by_depth fields.  */
2388   for (i = G.by_depth_in_use; i > 0; --i)
2389     {
2390       page_entry *p = G.by_depth[i-1];
2391       p->index_by_depth = i-1;
2392     }
2393 
2394   /* And last, we update the depth pointers in G.depth.  The first
2395      entry is already 0, and context 0 entries always start at index
2396      0, so there is nothing to update in the first slot.  We need a
2397      second slot, only if we have old ptes, and if we do, they start
2398      at index count_new_page_tables.  */
2399   if (count_old_page_tables)
2400     push_depth (count_new_page_tables);
2401 }
2402 
2403 void
ggc_pch_read(FILE * f,void * addr)2404 ggc_pch_read (FILE *f, void *addr)
2405 {
2406   struct ggc_pch_ondisk d;
2407   unsigned i;
2408   char *offs = (char *) addr;
2409   unsigned long count_old_page_tables;
2410   unsigned long count_new_page_tables;
2411 
2412   count_old_page_tables = G.by_depth_in_use;
2413 
2414   /* We've just read in a PCH file.  So, every object that used to be
2415      allocated is now free.  */
2416   clear_marks ();
2417 #ifdef ENABLE_GC_CHECKING
2418   poison_pages ();
2419 #endif
2420   /* Since we free all the allocated objects, the free list becomes
2421      useless.  Validate it now, which will also clear it.  */
2422   validate_free_objects();
2423 
2424   /* No object read from a PCH file should ever be freed.  So, set the
2425      context depth to 1, and set the depth of all the currently-allocated
2426      pages to be 1 too.  PCH pages will have depth 0.  */
2427   gcc_assert (!G.context_depth);
2428   G.context_depth = 1;
2429   for (i = 0; i < NUM_ORDERS; i++)
2430     {
2431       page_entry *p;
2432       for (p = G.pages[i]; p != NULL; p = p->next)
2433 	p->context_depth = G.context_depth;
2434     }
2435 
2436   /* Allocate the appropriate page-table entries for the pages read from
2437      the PCH file.  */
2438   if (fread (&d, sizeof (d), 1, f) != 1)
2439     fatal_error ("can%'t read PCH file: %m");
2440 
2441   for (i = 0; i < NUM_ORDERS; i++)
2442     {
2443       struct page_entry *entry;
2444       char *pte;
2445       size_t bytes;
2446       size_t num_objs;
2447       size_t j;
2448 
2449       if (d.totals[i] == 0)
2450 	continue;
2451 
2452       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2453       num_objs = bytes / OBJECT_SIZE (i);
2454       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2455 					    - sizeof (long)
2456 					    + BITMAP_SIZE (num_objs + 1)));
2457       entry->bytes = bytes;
2458       entry->page = offs;
2459       entry->context_depth = 0;
2460       offs += bytes;
2461       entry->num_free_objects = 0;
2462       entry->order = i;
2463 
2464       for (j = 0;
2465 	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2466 	   j += HOST_BITS_PER_LONG)
2467 	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2468       for (; j < num_objs + 1; j++)
2469 	entry->in_use_p[j / HOST_BITS_PER_LONG]
2470 	  |= 1L << (j % HOST_BITS_PER_LONG);
2471 
2472       for (pte = entry->page;
2473 	   pte < entry->page + entry->bytes;
2474 	   pte += G.pagesize)
2475 	set_page_table_entry (pte, entry);
2476 
2477       if (G.page_tails[i] != NULL)
2478 	G.page_tails[i]->next = entry;
2479       else
2480 	G.pages[i] = entry;
2481       G.page_tails[i] = entry;
2482 
2483       /* We start off by just adding all the new information to the
2484 	 end of the varrays, later, we will move the new information
2485 	 to the front of the varrays, as the PCH page tables are at
2486 	 context 0.  */
2487       push_by_depth (entry, 0);
2488     }
2489 
2490   /* Now, we update the various data structures that speed page table
2491      handling.  */
2492   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2493 
2494   move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2495 
2496   /* Update the statistics.  */
2497   G.allocated = G.allocated_last_gc = offs - (char *)addr;
2498 }
2499