1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2    Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3    Free Software Foundation, Inc.
4    Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
5    (dberlin@dberlin.org)
6 
7 
8 This file is part of GCC.
9 
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
14 
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING.  If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
23 02111-1307, USA.  */
24 
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "varray.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
39 
40 #ifdef ENABLE_VALGRIND_CHECKING
41 # ifdef HAVE_VALGRIND_MEMCHECK_H
42 #  include <valgrind/memcheck.h>
43 # elif defined HAVE_MEMCHECK_H
44 #  include <memcheck.h>
45 # else
46 #  include <valgrind.h>
47 # endif
48 #else
49 /* Avoid #ifdef:s when we can help it.  */
50 #define VALGRIND_DISCARD(x)
51 #define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
52 #define VALGRIND_FREELIKE_BLOCK(x,y)
53 #endif
54 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
55    file open.  Prefer either to valloc.  */
56 #ifdef HAVE_MMAP_ANON
57 # undef HAVE_MMAP_DEV_ZERO
58 
59 # include <sys/mman.h>
60 # ifndef MAP_FAILED
61 #  define MAP_FAILED -1
62 # endif
63 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
64 #  define MAP_ANONYMOUS MAP_ANON
65 # endif
66 # define USING_MMAP
67 
68 #endif
69 
70 #ifdef HAVE_MMAP_DEV_ZERO
71 
72 # include <sys/mman.h>
73 # ifndef MAP_FAILED
74 #  define MAP_FAILED -1
75 # endif
76 # define USING_MMAP
77 
78 #endif
79 
80 #ifndef USING_MMAP
81 #error "Zone collector requires mmap"
82 #endif
83 
84 #if (GCC_VERSION < 3001)
85 #define prefetch(X) ((void) X)
86 #else
87 #define prefetch(X) __builtin_prefetch (X)
88 #endif
89 
90 /* NOTES:
91    If we track inter-zone pointers, we can mark single zones at a
92    time.
93    If we have a zone where we guarantee no inter-zone pointers, we
94    could mark that zone separately.
95    The garbage zone should not be marked, and we should return 1 in
96    ggc_set_mark for any object in the garbage zone, which cuts off
97    marking quickly.  */
98 /* Stategy:
99 
100    This garbage-collecting allocator segregates objects into zones.
101    It also segregates objects into "large" and "small" bins.  Large
102    objects are greater or equal to page size.
103 
104    Pages for small objects are broken up into chunks, each of which
105    are described by a struct alloc_chunk.  One can walk over all
106    chunks on the page by adding the chunk size to the chunk's data
107    address.  The free space for a page exists in the free chunk bins.
108 
109    Each page-entry also has a context depth, which is used to track
110    pushing and popping of allocation contexts.  Only objects allocated
111    in the current (highest-numbered) context may be collected.
112 
113    Empty pages (of all sizes) are kept on a single page cache list,
114    and are considered first when new pages are required; they are
115    deallocated at the start of the next collection if they haven't
116    been recycled by then.  */
117 
118 /* Define GGC_DEBUG_LEVEL to print debugging information.
119      0: No debugging output.
120      1: GC statistics only.
121      2: Page-entry allocations/deallocations as well.
122      3: Object allocations as well.
123      4: Object marks as well.  */
124 #define GGC_DEBUG_LEVEL (0)
125 
126 #ifndef HOST_BITS_PER_PTR
127 #define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
128 #endif
129 #ifdef COOKIE_CHECKING
130 #define CHUNK_MAGIC 0x95321123
131 #define DEADCHUNK_MAGIC 0x12817317
132 #endif
133 
134 /* This structure manages small chunks.  When the chunk is free, it's
135    linked with other chunks via free_next.  When the chunk is allocated,
136    the data starts at u.  Large chunks are allocated one at a time to
137    their own page, and so don't come in here.
138 
139    The "type" field is a placeholder for a future change to do
140    generational collection.  At present it is 0 when free and
141    and 1 when allocated.  */
142 
143 struct alloc_chunk {
144 #ifdef COOKIE_CHECKING
145   unsigned int magic;
146 #endif
147   unsigned int type:1;
148   unsigned int typecode:14;
149   unsigned int large:1;
150   unsigned int size:15;
151   unsigned int mark:1;
152   union {
153     struct alloc_chunk *next_free;
154     char data[1];
155 
156     /* Make sure the data is sufficiently aligned.  */
157     HOST_WIDEST_INT align_i;
158 #ifdef HAVE_LONG_DOUBLE
159     long double align_d;
160 #else
161     double align_d;
162 #endif
163   } u;
164 } __attribute__ ((packed));
165 
166 #define CHUNK_OVERHEAD	(offsetof (struct alloc_chunk, u))
167 
168 /* We maintain several bins of free lists for chunks for very small
169    objects.  We never exhaustively search other bins -- if we don't
170    find one of the proper size, we allocate from the "larger" bin.  */
171 
172 /* Decreasing the number of free bins increases the time it takes to allocate.
173    Similar with increasing max_free_bin_size without increasing num_free_bins.
174 
175    After much histogramming of allocation sizes and time spent on gc,
176    on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
177    these were determined to be the optimal values.  */
178 #define NUM_FREE_BINS		64
179 #define MAX_FREE_BIN_SIZE	256
180 #define FREE_BIN_DELTA		(MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
181 #define SIZE_BIN_UP(SIZE)	(((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
182 #define SIZE_BIN_DOWN(SIZE)	((SIZE) / FREE_BIN_DELTA)
183 
184 /* Marker used as chunk->size for a large object.  Should correspond
185    to the size of the bitfield above.  */
186 #define LARGE_OBJECT_SIZE	0x7fff
187 
188 /* We use this structure to determine the alignment required for
189    allocations.  For power-of-two sized allocations, that's not a
190    problem, but it does matter for odd-sized allocations.  */
191 
192 struct max_alignment {
193   char c;
194   union {
195     HOST_WIDEST_INT i;
196 #ifdef HAVE_LONG_DOUBLE
197     long double d;
198 #else
199     double d;
200 #endif
201   } u;
202 };
203 
204 /* The biggest alignment required.  */
205 
206 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
207 
208 /* Compute the smallest nonnegative number which when added to X gives
209    a multiple of F.  */
210 
211 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
212 
213 /* Compute the smallest multiple of F that is >= X.  */
214 
215 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
216 
217 
218 /* A page_entry records the status of an allocation page.  */
219 typedef struct page_entry
220 {
221   /* The next page-entry with objects of the same size, or NULL if
222      this is the last page-entry.  */
223   struct page_entry *next;
224 
225   /* The number of bytes allocated.  (This will always be a multiple
226      of the host system page size.)  */
227   size_t bytes;
228 
229   /* How many collections we've survived.  */
230   size_t survived;
231 
232   /* The address at which the memory is allocated.  */
233   char *page;
234 
235   /* Context depth of this page.  */
236   unsigned short context_depth;
237 
238   /* Does this page contain small objects, or one large object?  */
239   bool large_p;
240 
241   /* The zone that this page entry belongs to.  */
242   struct alloc_zone *zone;
243 } page_entry;
244 
245 
246 /* The global variables.  */
247 static struct globals
248 {
249   /* The linked list of zones.  */
250   struct alloc_zone *zones;
251 
252   /* The system's page size.  */
253   size_t pagesize;
254   size_t lg_pagesize;
255 
256   /* A file descriptor open to /dev/zero for reading.  */
257 #if defined (HAVE_MMAP_DEV_ZERO)
258   int dev_zero_fd;
259 #endif
260 
261   /* The file descriptor for debugging output.  */
262   FILE *debug_file;
263 } G;
264 
265 /*  The zone allocation structure.  */
266 struct alloc_zone
267 {
268   /* Name of the zone.  */
269   const char *name;
270 
271   /* Linked list of pages in a zone.  */
272   page_entry *pages;
273 
274   /* Linked lists of free storage.  Slots 1 ... NUM_FREE_BINS have chunks of size
275      FREE_BIN_DELTA.  All other chunks are in slot 0.  */
276   struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
277 
278   /* Bytes currently allocated.  */
279   size_t allocated;
280 
281   /* Bytes currently allocated at the end of the last collection.  */
282   size_t allocated_last_gc;
283 
284   /* Total amount of memory mapped.  */
285   size_t bytes_mapped;
286 
287   /* Bit N set if any allocations have been done at context depth N.  */
288   unsigned long context_depth_allocations;
289 
290   /* Bit N set if any collections have been done at context depth N.  */
291   unsigned long context_depth_collections;
292 
293   /* The current depth in the context stack.  */
294   unsigned short context_depth;
295 
296   /* A cache of free system pages.  */
297   page_entry *free_pages;
298 
299   /* Next zone in the linked list of zones.  */
300   struct alloc_zone *next_zone;
301 
302   /* True if this zone was collected during this collection.  */
303   bool was_collected;
304 
305   /* True if this zone should be destroyed after the next collection.  */
306   bool dead;
307 } main_zone;
308 
309 struct alloc_zone *rtl_zone;
310 struct alloc_zone *garbage_zone;
311 struct alloc_zone *tree_zone;
312 
313 /* Allocate pages in chunks of this size, to throttle calls to memory
314    allocation routines.  The first page is used, the rest go onto the
315    free list.  This cannot be larger than HOST_BITS_PER_INT for the
316    in_use bitmask for page_group.  */
317 #define GGC_QUIRE_SIZE 16
318 
319 static int ggc_allocated_p (const void *);
320 #ifdef USING_MMAP
321 static char *alloc_anon (char *, size_t, struct alloc_zone *);
322 #endif
323 static struct page_entry * alloc_small_page ( struct alloc_zone *);
324 static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
325 static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
326 static void free_page (struct page_entry *);
327 static void release_pages (struct alloc_zone *);
328 static void sweep_pages (struct alloc_zone *);
329 static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
330 static bool ggc_collect_1 (struct alloc_zone *, bool);
331 static void check_cookies (void);
332 
333 
334 /* Returns nonzero if P was allocated in GC'able memory.  */
335 
336 static inline int
ggc_allocated_p(const void * p)337 ggc_allocated_p (const void *p)
338 {
339   struct alloc_chunk *chunk;
340   chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
341 #ifdef COOKIE_CHECKING
342   if (chunk->magic != CHUNK_MAGIC)
343     abort ();
344 #endif
345   if (chunk->type == 1)
346     return true;
347   return false;
348 }
349 
350 
351 #ifdef USING_MMAP
352 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
353    (if non-null).  The ifdef structure here is intended to cause a
354    compile error unless exactly one of the HAVE_* is defined.  */
355 
356 static inline char *
alloc_anon(char * pref ATTRIBUTE_UNUSED,size_t size,struct alloc_zone * zone)357 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
358 {
359 #ifdef HAVE_MMAP_ANON
360   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
361 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
362 #endif
363 #ifdef HAVE_MMAP_DEV_ZERO
364   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
365 			      MAP_PRIVATE, G.dev_zero_fd, 0);
366 #endif
367   VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
368 
369   if (page == (char *) MAP_FAILED)
370     {
371       perror ("virtual memory exhausted");
372       exit (FATAL_EXIT_CODE);
373     }
374 
375   /* Remember that we allocated this memory.  */
376   zone->bytes_mapped += size;
377   /* Pretend we don't have access to the allocated pages.  We'll enable
378      access to smaller pieces of the area in ggc_alloc.  Discard the
379      handle to avoid handle leak.  */
380   VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
381   return page;
382 }
383 #endif
384 
385 /* Allocate a new page for allocating objects of size 2^ORDER,
386    and return an entry for it.  */
387 
388 static inline struct page_entry *
alloc_small_page(struct alloc_zone * zone)389 alloc_small_page (struct alloc_zone *zone)
390 {
391   struct page_entry *entry;
392   char *page;
393 
394   page = NULL;
395 
396   /* Check the list of free pages for one we can use.  */
397   entry = zone->free_pages;
398   if (entry != NULL)
399     {
400       /* Recycle the allocated memory from this page ...  */
401       zone->free_pages = entry->next;
402       page = entry->page;
403 
404 
405     }
406 #ifdef USING_MMAP
407   else
408     {
409       /* We want just one page.  Allocate a bunch of them and put the
410 	 extras on the freelist.  (Can only do this optimization with
411 	 mmap for backing store.)  */
412       struct page_entry *e, *f = zone->free_pages;
413       int i;
414 
415       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
416 
417       /* This loop counts down so that the chain will be in ascending
418 	 memory order.  */
419       for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
420 	{
421 	  e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
422 	  e->bytes = G.pagesize;
423 	  e->page = page + (i << G.lg_pagesize);
424 	  e->next = f;
425 	  f = e;
426 	}
427 
428       zone->free_pages = f;
429     }
430 #endif
431   if (entry == NULL)
432     entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
433 
434   entry->next = 0;
435   entry->bytes = G.pagesize;
436   entry->page = page;
437   entry->context_depth = zone->context_depth;
438   entry->large_p = false;
439   entry->zone = zone;
440   zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
441 
442   if (GGC_DEBUG_LEVEL >= 2)
443     fprintf (G.debug_file,
444 	     "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
445 	     (PTR) entry, page, page + G.pagesize - 1);
446 
447   return entry;
448 }
449 /* Compute the smallest multiple of F that is >= X.  */
450 
451 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
452 
453 /* Allocate a large page of size SIZE in ZONE.  */
454 
455 static inline struct page_entry *
alloc_large_page(size_t size,struct alloc_zone * zone)456 alloc_large_page (size_t size, struct alloc_zone *zone)
457 {
458   struct page_entry *entry;
459   char *page;
460   size =  ROUND_UP (size, 1024);
461   page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
462   entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
463 
464   entry->next = 0;
465   entry->bytes = size;
466   entry->page = page;
467   entry->context_depth = zone->context_depth;
468   entry->large_p = true;
469   entry->zone = zone;
470   zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
471 
472   if (GGC_DEBUG_LEVEL >= 2)
473     fprintf (G.debug_file,
474 	     "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
475 	     (PTR) entry, page, page + size - 1);
476 
477   return entry;
478 }
479 
480 
481 /* For a page that is no longer needed, put it on the free page list.  */
482 
483 static inline void
free_page(page_entry * entry)484 free_page (page_entry *entry)
485 {
486   if (GGC_DEBUG_LEVEL >= 2)
487     fprintf (G.debug_file,
488 	     "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
489 	     entry->page, entry->page + entry->bytes - 1);
490 
491   if (entry->large_p)
492     {
493       free (entry->page);
494       VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
495     }
496   else
497     {
498       /* Mark the page as inaccessible.  Discard the handle to
499 	 avoid handle leak.  */
500       VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
501 
502       entry->next = entry->zone->free_pages;
503       entry->zone->free_pages = entry;
504     }
505 }
506 
507 /* Release the free page cache to the system.  */
508 
509 static void
release_pages(struct alloc_zone * zone)510 release_pages (struct alloc_zone *zone)
511 {
512 #ifdef USING_MMAP
513   page_entry *p, *next;
514   char *start;
515   size_t len;
516 
517   /* Gather up adjacent pages so they are unmapped together.  */
518   p = zone->free_pages;
519 
520   while (p)
521     {
522       start = p->page;
523       next = p->next;
524       len = p->bytes;
525       free (p);
526       p = next;
527 
528       while (p && p->page == start + len)
529 	{
530 	  next = p->next;
531 	  len += p->bytes;
532 	  free (p);
533 	  p = next;
534 	}
535 
536       munmap (start, len);
537       zone->bytes_mapped -= len;
538     }
539 
540   zone->free_pages = NULL;
541 #endif
542 }
543 
544 /* Place CHUNK of size SIZE on the free list for ZONE.  */
545 
546 static inline void
free_chunk(struct alloc_chunk * chunk,size_t size,struct alloc_zone * zone)547 free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
548 {
549   size_t bin = 0;
550 
551   bin = SIZE_BIN_DOWN (size);
552   if (bin == 0)
553     abort ();
554   if (bin > NUM_FREE_BINS)
555     bin = 0;
556 #ifdef COOKIE_CHECKING
557   if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
558     abort ();
559   chunk->magic = DEADCHUNK_MAGIC;
560 #endif
561   chunk->u.next_free = zone->free_chunks[bin];
562   zone->free_chunks[bin] = chunk;
563   if (GGC_DEBUG_LEVEL >= 3)
564     fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
565   VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
566 }
567 
568 /* Allocate a chunk of memory of SIZE bytes.  */
569 
570 static void *
ggc_alloc_zone_1(size_t size,struct alloc_zone * zone,short type)571 ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
572 {
573   size_t bin = 0;
574   size_t lsize = 0;
575   struct page_entry *entry;
576   struct alloc_chunk *chunk, *lchunk, **pp;
577   void *result;
578 
579   /* Align size, so that we're assured of aligned allocations.  */
580   if (size < FREE_BIN_DELTA)
581     size = FREE_BIN_DELTA;
582   size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
583 
584   /* Large objects are handled specially.  */
585   if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
586     {
587       size = ROUND_UP (size, 1024);
588       entry = alloc_large_page (size, zone);
589       entry->survived = 0;
590       entry->next = entry->zone->pages;
591       entry->zone->pages = entry;
592 
593       chunk = (struct alloc_chunk *) entry->page;
594       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
595       chunk->large = 1;
596       chunk->size = CEIL (size, 1024);
597 
598       goto found;
599     }
600 
601   /* First look for a tiny object already segregated into its own
602      size bucket.  */
603   bin = SIZE_BIN_UP (size);
604   if (bin <= NUM_FREE_BINS)
605     {
606       chunk = zone->free_chunks[bin];
607       if (chunk)
608 	{
609 	  zone->free_chunks[bin] = chunk->u.next_free;
610 	  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
611 	  goto found;
612 	}
613     }
614 
615   /* Failing that, look through the "other" bucket for a chunk
616      that is large enough.  */
617   pp = &(zone->free_chunks[0]);
618   chunk = *pp;
619   while (chunk && chunk->size < size)
620     {
621       pp = &chunk->u.next_free;
622       chunk = *pp;
623     }
624 
625   /* Failing that, allocate new storage.  */
626   if (!chunk)
627     {
628       entry = alloc_small_page (zone);
629       entry->next = entry->zone->pages;
630       entry->zone->pages = entry;
631 
632       chunk = (struct alloc_chunk *) entry->page;
633       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
634       chunk->size = G.pagesize - CHUNK_OVERHEAD;
635       chunk->large = 0;
636     }
637   else
638     {
639       *pp = chunk->u.next_free;
640       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
641       chunk->large = 0;
642     }
643   /* Release extra memory from a chunk that's too big.  */
644   lsize = chunk->size - size;
645   if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
646     {
647       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
648       chunk->size = size;
649 
650       lsize -= CHUNK_OVERHEAD;
651       lchunk = (struct alloc_chunk *)(chunk->u.data + size);
652       VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
653 #ifdef COOKIE_CHECKING
654       lchunk->magic = CHUNK_MAGIC;
655 #endif
656       lchunk->type = 0;
657       lchunk->mark = 0;
658       lchunk->size = lsize;
659       lchunk->large = 0;
660       free_chunk (lchunk, lsize, zone);
661     }
662   /* Calculate the object's address.  */
663  found:
664 #ifdef COOKIE_CHECKING
665   chunk->magic = CHUNK_MAGIC;
666 #endif
667   chunk->type = 1;
668   chunk->mark = 0;
669   chunk->typecode = type;
670   result = chunk->u.data;
671 
672 #ifdef ENABLE_GC_CHECKING
673   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
674      exact same semantics in presence of memory bugs, regardless of
675      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
676      handle to avoid handle leak.  */
677   VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
678 
679   /* `Poison' the entire allocated object.  */
680   memset (result, 0xaf, size);
681 #endif
682 
683   /* Tell Valgrind that the memory is there, but its content isn't
684      defined.  The bytes at the end of the object are still marked
685      unaccessible.  */
686   VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
687 
688   /* Keep track of how many bytes are being allocated.  This
689      information is used in deciding when to collect.  */
690   zone->allocated += size + CHUNK_OVERHEAD;
691 
692   if (GGC_DEBUG_LEVEL >= 3)
693     fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
694 	     (void *)chunk, (unsigned long) size, result);
695 
696   return result;
697 }
698 
699 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
700    for that type.  */
701 
702 void *
ggc_alloc_typed(enum gt_types_enum gte,size_t size)703 ggc_alloc_typed (enum gt_types_enum gte, size_t size)
704 {
705   switch (gte)
706     {
707     case gt_ggc_e_14lang_tree_node:
708       return ggc_alloc_zone_1 (size, tree_zone, gte);
709 
710     case gt_ggc_e_7rtx_def:
711       return ggc_alloc_zone_1 (size, rtl_zone, gte);
712 
713     case gt_ggc_e_9rtvec_def:
714       return ggc_alloc_zone_1 (size, rtl_zone, gte);
715 
716     default:
717       return ggc_alloc_zone_1 (size, &main_zone, gte);
718     }
719 }
720 
721 /* Normal ggc_alloc simply allocates into the main zone.  */
722 
723 void *
ggc_alloc(size_t size)724 ggc_alloc (size_t size)
725 {
726   return ggc_alloc_zone_1 (size, &main_zone, -1);
727 }
728 
729 /* Zone allocation allocates into the specified zone.  */
730 
731 void *
ggc_alloc_zone(size_t size,struct alloc_zone * zone)732 ggc_alloc_zone (size_t size, struct alloc_zone *zone)
733 {
734   return ggc_alloc_zone_1 (size, zone, -1);
735 }
736 
737 /* If P is not marked, mark it and return false.  Otherwise return true.
738    P must have been allocated by the GC allocator; it mustn't point to
739    static objects, stack variables, or memory allocated with malloc.  */
740 
741 int
ggc_set_mark(const void * p)742 ggc_set_mark (const void *p)
743 {
744   struct alloc_chunk *chunk;
745 
746   chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
747 #ifdef COOKIE_CHECKING
748   if (chunk->magic != CHUNK_MAGIC)
749     abort ();
750 #endif
751   if (chunk->mark)
752     return 1;
753   chunk->mark = 1;
754 
755   if (GGC_DEBUG_LEVEL >= 4)
756     fprintf (G.debug_file, "Marking %p\n", p);
757 
758   return 0;
759 }
760 
761 /* Return 1 if P has been marked, zero otherwise.
762    P must have been allocated by the GC allocator; it mustn't point to
763    static objects, stack variables, or memory allocated with malloc.  */
764 
765 int
ggc_marked_p(const void * p)766 ggc_marked_p (const void *p)
767 {
768   struct alloc_chunk *chunk;
769 
770   chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
771 #ifdef COOKIE_CHECKING
772   if (chunk->magic != CHUNK_MAGIC)
773     abort ();
774 #endif
775   return chunk->mark;
776 }
777 
778 /* Return the size of the gc-able object P.  */
779 
780 size_t
ggc_get_size(const void * p)781 ggc_get_size (const void *p)
782 {
783   struct alloc_chunk *chunk;
784 
785   chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
786 #ifdef COOKIE_CHECKING
787   if (chunk->magic != CHUNK_MAGIC)
788     abort ();
789 #endif
790   if (chunk->large)
791     return chunk->size * 1024;
792 
793   return chunk->size;
794 }
795 
796 /* Initialize the ggc-zone-mmap allocator.  */
797 void
init_ggc(void)798 init_ggc (void)
799 {
800   /* Set up the main zone by hand.  */
801   main_zone.name = "Main zone";
802   G.zones = &main_zone;
803 
804   /* Allocate the default zones.  */
805   rtl_zone = new_ggc_zone ("RTL zone");
806   tree_zone = new_ggc_zone ("Tree zone");
807   garbage_zone = new_ggc_zone ("Garbage zone");
808 
809   G.pagesize = getpagesize();
810   G.lg_pagesize = exact_log2 (G.pagesize);
811 #ifdef HAVE_MMAP_DEV_ZERO
812   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
813   if (G.dev_zero_fd == -1)
814     abort ();
815 #endif
816 
817 #if 0
818   G.debug_file = fopen ("ggc-mmap.debug", "w");
819   setlinebuf (G.debug_file);
820 #else
821   G.debug_file = stdout;
822 #endif
823 
824 #ifdef USING_MMAP
825   /* StunOS has an amazing off-by-one error for the first mmap allocation
826      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
827      believe, is an unaligned page allocation, which would cause us to
828      hork badly if we tried to use it.  */
829   {
830     char *p = alloc_anon (NULL, G.pagesize, &main_zone);
831     struct page_entry *e;
832     if ((size_t)p & (G.pagesize - 1))
833       {
834 	/* How losing.  Discard this one and try another.  If we still
835 	   can't get something useful, give up.  */
836 
837 	p = alloc_anon (NULL, G.pagesize, &main_zone);
838 	if ((size_t)p & (G.pagesize - 1))
839 	  abort ();
840       }
841 
842     /* We have a good page, might as well hold onto it...  */
843     e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
844     e->bytes = G.pagesize;
845     e->page = p;
846     e->next = main_zone.free_pages;
847     main_zone.free_pages = e;
848   }
849 #endif
850 }
851 
852 /* Start a new GGC zone.  */
853 
854 struct alloc_zone *
new_ggc_zone(const char * name)855 new_ggc_zone (const char * name)
856 {
857   struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
858   new_zone->name = name;
859   new_zone->next_zone = G.zones->next_zone;
860   G.zones->next_zone = new_zone;
861   return new_zone;
862 }
863 
864 /* Destroy a GGC zone.  */
865 void
destroy_ggc_zone(struct alloc_zone * dead_zone)866 destroy_ggc_zone (struct alloc_zone * dead_zone)
867 {
868   struct alloc_zone *z;
869 
870   for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
871     /* Just find that zone.  */ ;
872 
873 #ifdef ENABLE_CHECKING
874   /* We should have found the zone in the list.  Anything else is fatal.  */
875   if (!z)
876     abort ();
877 #endif
878 
879   /* z is dead, baby. z is dead.  */
880   z->dead= true;
881 }
882 
883 /* Increment the `GC context'.  Objects allocated in an outer context
884    are never freed, eliminating the need to register their roots.  */
885 
886 void
ggc_push_context(void)887 ggc_push_context (void)
888 {
889   struct alloc_zone *zone;
890   for (zone = G.zones; zone; zone = zone->next_zone)
891     ++(zone->context_depth);
892   /* Die on wrap.  */
893   if (main_zone.context_depth >= HOST_BITS_PER_LONG)
894     abort ();
895 }
896 
897 /* Decrement the `GC context'.  All objects allocated since the
898    previous ggc_push_context are migrated to the outer context.  */
899 
900 static void
ggc_pop_context_1(struct alloc_zone * zone)901 ggc_pop_context_1 (struct alloc_zone *zone)
902 {
903   unsigned long omask;
904   unsigned depth;
905   page_entry *p;
906 
907   depth = --(zone->context_depth);
908   omask = (unsigned long)1 << (depth + 1);
909 
910   if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
911     return;
912 
913   zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
914   zone->context_depth_allocations &= omask - 1;
915   zone->context_depth_collections &= omask - 1;
916 
917   /* Any remaining pages in the popped context are lowered to the new
918      current context; i.e. objects allocated in the popped context and
919      left over are imported into the previous context.  */
920   for (p = zone->pages; p != NULL; p = p->next)
921     if (p->context_depth > depth)
922       p->context_depth = depth;
923 }
924 
925 /* Pop all the zone contexts.  */
926 
927 void
ggc_pop_context(void)928 ggc_pop_context (void)
929 {
930   struct alloc_zone *zone;
931   for (zone = G.zones; zone; zone = zone->next_zone)
932     ggc_pop_context_1 (zone);
933 }
934 /* Poison the chunk.  */
935 #ifdef ENABLE_GC_CHECKING
936 #define poison_chunk(CHUNK, SIZE) \
937   memset ((CHUNK)->u.data, 0xa5, (SIZE))
938 #else
939 #define poison_chunk(CHUNK, SIZE)
940 #endif
941 
942 /* Free all empty pages and objects within a page for a given zone  */
943 
944 static void
sweep_pages(struct alloc_zone * zone)945 sweep_pages (struct alloc_zone *zone)
946 {
947   page_entry **pp, *p, *next;
948   struct alloc_chunk *chunk, *last_free, *end;
949   size_t last_free_size, allocated = 0;
950   bool nomarksinpage;
951   /* First, reset the free_chunks lists, since we are going to
952      re-free free chunks in hopes of coalescing them into large chunks.  */
953   memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
954   pp = &zone->pages;
955   for (p = zone->pages; p ; p = next)
956     {
957       next = p->next;
958       /* Large pages are all or none affairs. Either they are
959 	 completely empty, or they are completely full.
960 
961 	 XXX: Should we bother to increment allocated.  */
962       if (p->large_p)
963 	{
964 	  if (((struct alloc_chunk *)p->page)->mark == 1)
965 	    {
966 	      ((struct alloc_chunk *)p->page)->mark = 0;
967 	    }
968 	  else
969 	    {
970 	      *pp = next;
971 #ifdef ENABLE_GC_CHECKING
972 	  /* Poison the page.  */
973 	  memset (p->page, 0xb5, p->bytes);
974 #endif
975 	      free_page (p);
976 	    }
977 	  continue;
978 	}
979 
980       /* This page has now survived another collection.  */
981       p->survived++;
982 
983       /* Which leaves full and partial pages.  Step through all chunks,
984 	 consolidate those that are free and insert them into the free
985 	 lists.  Note that consolidation slows down collection
986 	 slightly.  */
987 
988       chunk = (struct alloc_chunk *)p->page;
989       end = (struct alloc_chunk *)(p->page + G.pagesize);
990       last_free = NULL;
991       last_free_size = 0;
992       nomarksinpage = true;
993       do
994 	{
995 	  prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
996 	  if (chunk->mark || p->context_depth < zone->context_depth)
997 	    {
998 	      nomarksinpage = false;
999 	      if (last_free)
1000 		{
1001 		  last_free->type = 0;
1002 		  last_free->size = last_free_size;
1003 		  last_free->mark = 0;
1004 		  poison_chunk (last_free, last_free_size);
1005 		  free_chunk (last_free, last_free_size, zone);
1006 		  last_free = NULL;
1007 		}
1008 	      if (chunk->mark)
1009 	        {
1010 	          allocated += chunk->size + CHUNK_OVERHEAD;
1011 		}
1012 	      chunk->mark = 0;
1013 	    }
1014 	  else
1015 	    {
1016 	      if (last_free)
1017 	        {
1018 		  last_free_size += CHUNK_OVERHEAD + chunk->size;
1019 		}
1020 	      else
1021 		{
1022 		  last_free = chunk;
1023 		  last_free_size = chunk->size;
1024 		}
1025 	    }
1026 
1027 	  chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1028 	}
1029       while (chunk < end);
1030 
1031       if (nomarksinpage)
1032 	{
1033 	  *pp = next;
1034 #ifdef ENABLE_GC_CHECKING
1035 	  /* Poison the page.  */
1036 	  memset (p->page, 0xb5, p->bytes);
1037 #endif
1038 	  free_page (p);
1039 	  continue;
1040 	}
1041       else if (last_free)
1042 	{
1043 	  last_free->type = 0;
1044 	  last_free->size = last_free_size;
1045 	  last_free->mark = 0;
1046 	  poison_chunk (last_free, last_free_size);
1047 	  free_chunk (last_free, last_free_size, zone);
1048 	}
1049       pp = &p->next;
1050     }
1051 
1052   zone->allocated = allocated;
1053 }
1054 
1055 /* mark-and-sweep routine for collecting a single zone.  NEED_MARKING
1056    is true if we need to mark before sweeping, false if some other
1057    zone collection has already performed marking for us.  Returns true
1058    if we collected, false otherwise.  */
1059 
1060 static bool
ggc_collect_1(struct alloc_zone * zone,bool need_marking)1061 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1062 {
1063   if (!zone->dead)
1064     {
1065       /* Avoid frequent unnecessary work by skipping collection if the
1066 	 total allocations haven't expanded much since the last
1067 	 collection.  */
1068       float allocated_last_gc =
1069 	MAX (zone->allocated_last_gc,
1070 	     (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1071 
1072       float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1073 
1074       if (zone->allocated < allocated_last_gc + min_expand)
1075 	return false;
1076     }
1077 
1078   if (!quiet_flag)
1079     fprintf (stderr, " {%s GC %luk -> ",
1080 	     zone->name, (unsigned long) zone->allocated / 1024);
1081 
1082   /* Zero the total allocated bytes.  This will be recalculated in the
1083      sweep phase.  */
1084   zone->allocated = 0;
1085 
1086   /* Release the pages we freed the last time we collected, but didn't
1087      reuse in the interim.  */
1088   release_pages (zone);
1089 
1090   /* Indicate that we've seen collections at this context depth.  */
1091   zone->context_depth_collections
1092     = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
1093   if (need_marking)
1094     ggc_mark_roots ();
1095   sweep_pages (zone);
1096   zone->was_collected = true;
1097   zone->allocated_last_gc = zone->allocated;
1098 
1099   if (!quiet_flag)
1100     fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1101   return true;
1102 }
1103 
1104 /* Calculate the average page survival rate in terms of number of
1105    collections.  */
1106 
1107 static float
calculate_average_page_survival(struct alloc_zone * zone)1108 calculate_average_page_survival (struct alloc_zone *zone)
1109 {
1110   float count = 0.0;
1111   float survival = 0.0;
1112   page_entry *p;
1113   for (p = zone->pages; p; p = p->next)
1114     {
1115       count += 1.0;
1116       survival += p->survived;
1117     }
1118   return survival/count;
1119 }
1120 
1121 /* Check the magic cookies all of the chunks contain, to make sure we
1122    aren't doing anything stupid, like stomping on alloc_chunk
1123    structures.  */
1124 
1125 static inline void
check_cookies(void)1126 check_cookies (void)
1127 {
1128 #ifdef COOKIE_CHECKING
1129   page_entry *p;
1130   struct alloc_zone *zone;
1131 
1132   for (zone = G.zones; zone; zone = zone->next_zone)
1133     {
1134       for (p = zone->pages; p; p = p->next)
1135 	{
1136 	  if (!p->large_p)
1137 	    {
1138 	      struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1139 	      struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1140 	      do
1141 		{
1142 		  if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
1143 		    abort ();
1144 		  chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1145 		}
1146 	      while (chunk < end);
1147 	    }
1148 	}
1149     }
1150 #endif
1151 }
1152 /* Top level collection routine.  */
1153 
1154 void
ggc_collect(void)1155 ggc_collect (void)
1156 {
1157   struct alloc_zone *zone;
1158   bool marked = false;
1159   float f;
1160 
1161   timevar_push (TV_GC);
1162   check_cookies ();
1163   /* Start by possibly collecting the main zone.  */
1164   main_zone.was_collected = false;
1165   marked |= ggc_collect_1 (&main_zone, true);
1166 
1167   /* In order to keep the number of collections down, we don't
1168      collect other zones unless we are collecting the main zone.  This
1169      gives us roughly the same number of collections as we used to
1170      have with the old gc.  The number of collection is important
1171      because our main slowdown (according to profiling) is now in
1172      marking.  So if we mark twice as often as we used to, we'll be
1173      twice as slow.  Hopefully we'll avoid this cost when we mark
1174      zone-at-a-time.  */
1175 
1176   if (main_zone.was_collected)
1177     {
1178       struct alloc_zone *zone;
1179 
1180       for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1181 	{
1182 	  check_cookies ();
1183 	  zone->was_collected = false;
1184 	  marked |= ggc_collect_1 (zone, !marked);
1185 	}
1186     }
1187 
1188   /* Print page survival stats, if someone wants them.  */
1189   if (GGC_DEBUG_LEVEL >= 2)
1190     {
1191       for (zone = G.zones; zone; zone = zone->next_zone)
1192 	{
1193 	  if (zone->was_collected)
1194 	    {
1195 	      f = calculate_average_page_survival (zone);
1196 	      printf ("Average page survival in zone `%s' is %f\n",
1197 		      zone->name, f);
1198 	    }
1199 	}
1200     }
1201 
1202   /* Since we don't mark zone at a time right now, marking in any
1203      zone means marking in every zone. So we have to clear all the
1204      marks in all the zones that weren't collected already.  */
1205   if (marked)
1206     {
1207       page_entry *p;
1208       for (zone = G.zones; zone; zone = zone->next_zone)
1209       {
1210 	if (zone->was_collected)
1211 	  continue;
1212 	for (p = zone->pages; p; p = p->next)
1213 	  {
1214 	    if (!p->large_p)
1215 	      {
1216 		struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
1217 		struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
1218 		do
1219 		  {
1220 		    prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
1221 		    if (chunk->mark || p->context_depth < zone->context_depth)
1222 		      {
1223 			chunk->mark = 0;
1224 		      }
1225 		    chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
1226 		  }
1227 		while (chunk < end);
1228 	      }
1229 	    else
1230 	      {
1231 		((struct alloc_chunk *)p->page)->mark = 0;
1232 	      }
1233 	  }
1234       }
1235     }
1236 
1237   /* Free dead zones.  */
1238   for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1239     {
1240       if (zone->next_zone->dead)
1241 	{
1242 	  struct alloc_zone *dead_zone = zone->next_zone;
1243 
1244 	  printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1245 
1246 	  /* The zone must be empty.  */
1247 	  if (dead_zone->allocated != 0)
1248 	    abort ();
1249 
1250 	  /* Unchain the dead zone, release all its pages and free it.  */
1251 	  zone->next_zone = zone->next_zone->next_zone;
1252 	  release_pages (dead_zone);
1253 	  free (dead_zone);
1254 	}
1255     }
1256 
1257   timevar_pop (TV_GC);
1258 }
1259 
1260 /* Print allocation statistics.  */
1261 
1262 void
ggc_print_statistics(void)1263 ggc_print_statistics (void)
1264 {
1265 }
1266 
1267 struct ggc_pch_data
1268 {
1269   struct ggc_pch_ondisk
1270   {
1271     unsigned total;
1272   } d;
1273   size_t base;
1274   size_t written;
1275 };
1276 
1277 /* Initialize the PCH datastructure.  */
1278 
1279 struct ggc_pch_data *
init_ggc_pch(void)1280 init_ggc_pch (void)
1281 {
1282   return xcalloc (sizeof (struct ggc_pch_data), 1);
1283 }
1284 
1285 /* Add the size of object X to the size of the PCH data.  */
1286 
1287 void
ggc_pch_count_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string)1288 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
1289 		      size_t size, bool is_string)
1290 {
1291   if (!is_string)
1292     {
1293       d->d.total += size + CHUNK_OVERHEAD;
1294     }
1295   else
1296     d->d.total += size;
1297 }
1298 
1299 /* Return the total size of the PCH data.  */
1300 
1301 size_t
ggc_pch_total_size(struct ggc_pch_data * d)1302 ggc_pch_total_size (struct ggc_pch_data *d)
1303 {
1304   return d->d.total;
1305 }
1306 
1307 /* Set the base address for the objects in the PCH file.  */
1308 
1309 void
ggc_pch_this_base(struct ggc_pch_data * d,void * base)1310 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
1311 {
1312   d->base = (size_t) base;
1313 }
1314 
1315 /* Allocate a place for object X of size SIZE in the PCH file.  */
1316 
1317 char *
ggc_pch_alloc_object(struct ggc_pch_data * d,void * x,size_t size,bool is_string)1318 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
1319 		      size_t size, bool is_string)
1320 {
1321   char *result;
1322   result = (char *)d->base;
1323   if (!is_string)
1324     {
1325       struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1326       if (chunk->large)
1327 	d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
1328       else
1329 	d->base += chunk->size + CHUNK_OVERHEAD;
1330       return result + CHUNK_OVERHEAD;
1331     }
1332   else
1333     {
1334       d->base += size;
1335       return result;
1336     }
1337 
1338 }
1339 
1340 /* Prepare to write out the PCH data to file F.  */
1341 
1342 void
ggc_pch_prepare_write(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f ATTRIBUTE_UNUSED)1343 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1344 		       FILE *f ATTRIBUTE_UNUSED)
1345 {
1346   /* Nothing to do.  */
1347 }
1348 
1349 /* Write out object X of SIZE to file F.  */
1350 
1351 void
ggc_pch_write_object(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f,void * x,void * newx ATTRIBUTE_UNUSED,size_t size,bool is_string)1352 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
1353 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
1354 		      size_t size, bool is_string)
1355 {
1356   if (!is_string)
1357     {
1358       struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
1359       size = ggc_get_size (x);
1360       if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
1361 	fatal_error ("can't write PCH file: %m");
1362       d->written += size + CHUNK_OVERHEAD;
1363     }
1364    else
1365      {
1366        if (fwrite (x, size, 1, f) != 1)
1367 	 fatal_error ("can't write PCH file: %m");
1368        d->written += size;
1369      }
1370   if (d->written == d->d.total
1371       && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0)
1372     fatal_error ("can't write PCH file: %m");
1373 }
1374 
1375 void
ggc_pch_finish(struct ggc_pch_data * d,FILE * f)1376 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
1377 {
1378   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
1379     fatal_error ("can't write PCH file: %m");
1380   free (d);
1381 }
1382 void
ggc_pch_read(FILE * f,void * addr)1383 ggc_pch_read (FILE *f, void *addr)
1384 {
1385   struct ggc_pch_ondisk d;
1386   struct page_entry *entry;
1387   struct alloc_zone *pch_zone;
1388   if (fread (&d, sizeof (d), 1, f) != 1)
1389     fatal_error ("can't read PCH file: %m");
1390   entry = xcalloc (1, sizeof (struct page_entry));
1391   entry->bytes = d.total;
1392   entry->page = addr;
1393   entry->context_depth = 0;
1394   pch_zone = new_ggc_zone ("PCH zone");
1395   entry->zone = pch_zone;
1396   entry->next = entry->zone->pages;
1397   entry->zone->pages = entry;
1398 }
1399